]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
net: propagate tc filter chain index down the ndo_setup_tc call
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
4ad79e13 1/* bnx2x_cmn.c: QLogic Everest network driver.
9f6c9258 2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
4ad79e13
YM
4 * Copyright (c) 2014 QLogic Corporation
5 * All rights reserved
9f6c9258
DK
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
08f6dd89 11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
9f6c9258
DK
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
17 *
18 */
19
f1deab50
JP
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
9f6c9258 22#include <linux/etherdevice.h>
9bcc0893 23#include <linux/if_vlan.h>
a6b7a407 24#include <linux/interrupt.h>
9f6c9258 25#include <linux/ip.h>
c9931896 26#include <linux/crash_dump.h>
9969085e 27#include <net/tcp.h>
f2e0899f 28#include <net/ipv6.h>
7f3e01fe 29#include <net/ip6_checksum.h>
076bb0c8 30#include <net/busy_poll.h>
c0cba59e 31#include <linux/prefetch.h>
9f6c9258 32#include "bnx2x_cmn.h"
523224a3 33#include "bnx2x_init.h"
042181f5 34#include "bnx2x_sp.h"
9f6c9258 35
a8f47eb7 36static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39static int bnx2x_poll(struct napi_struct *napi, int budget);
40
41static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42{
43 int i;
44
45 /* Add NAPI objects */
46 for_each_rx_queue_cnic(bp, i) {
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48 bnx2x_poll, NAPI_POLL_WEIGHT);
a8f47eb7 49 }
50}
51
52static void bnx2x_add_all_napi(struct bnx2x *bp)
53{
54 int i;
55
56 /* Add NAPI objects */
57 for_each_eth_queue(bp, i) {
58 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
59 bnx2x_poll, NAPI_POLL_WEIGHT);
a8f47eb7 60 }
61}
62
63static int bnx2x_calc_num_queues(struct bnx2x *bp)
64{
7d0445d6 65 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
ff2ad307
MS
66
67 /* Reduce memory usage in kdump environment by using only one queue */
c9931896 68 if (is_kdump_kernel())
ff2ad307
MS
69 nq = 1;
70
7d0445d6
MS
71 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
72 return nq;
a8f47eb7 73}
74
b3b83c3f
DK
75/**
76 * bnx2x_move_fp - move content of the fastpath structure.
77 *
78 * @bp: driver handle
79 * @from: source FP index
80 * @to: destination FP index
81 *
82 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
83 * intact. This is done by first copying the napi struct from
84 * the target to the source, and then mem copying the entire
65565884
MS
85 * source onto the target. Update txdata pointers and related
86 * content.
b3b83c3f
DK
87 */
88static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89{
90 struct bnx2x_fastpath *from_fp = &bp->fp[from];
91 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
92 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
96 int old_max_eth_txqs, new_max_eth_txqs;
97 int old_txdata_index = 0, new_txdata_index = 0;
34d5626a 98 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
72754080
AE
99
100 /* Copy the NAPI object as it has been already initialized */
101 from_fp->napi = to_fp->napi;
102
b3b83c3f
DK
103 /* Move bnx2x_fastpath contents */
104 memcpy(to_fp, from_fp, sizeof(*to_fp));
105 to_fp->index = to;
65565884 106
34d5626a
YM
107 /* Retain the tpa_info of the original `to' version as we don't want
108 * 2 FPs to contain the same tpa_info pointer.
109 */
110 to_fp->tpa_info = old_tpa_info;
111
15192a8c
BW
112 /* move sp_objs contents as well, as their indices match fp ones */
113 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114
115 /* move fp_stats contents as well, as their indices match fp ones */
116 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117
65565884
MS
118 /* Update txdata pointers in fp and move txdata content accordingly:
119 * Each fp consumes 'max_cos' txdata structures, so the index should be
120 * decremented by max_cos x delta.
121 */
122
123 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 (bp)->max_cos;
126 if (from == FCOE_IDX(bp)) {
127 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
129 }
130
4864a16a
YM
131 memcpy(&bp->bnx2x_txq[new_txdata_index],
132 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
133 sizeof(struct bnx2x_fp_txdata));
134 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
135}
136
8ca5e17e
AE
137/**
138 * bnx2x_fill_fw_str - Fill buffer with FW version string.
139 *
140 * @bp: driver handle
141 * @buf: character buffer to fill with the fw name
142 * @buf_len: length of the above buffer
143 *
144 */
145void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
146{
147 if (IS_PF(bp)) {
148 u8 phy_fw_ver[PHY_FW_VER_LEN];
149
150 phy_fw_ver[0] = '\0';
151 bnx2x_get_ext_phy_fw_version(&bp->link_params,
152 phy_fw_ver, PHY_FW_VER_LEN);
153 strlcpy(buf, bp->fw_ver, buf_len);
154 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 "bc %d.%d.%d%s%s",
156 (bp->common.bc_ver & 0xff0000) >> 16,
157 (bp->common.bc_ver & 0xff00) >> 8,
158 (bp->common.bc_ver & 0xff),
159 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 } else {
6411280a 161 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
162 }
163}
164
4864a16a
YM
165/**
166 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
167 *
168 * @bp: driver handle
169 * @delta: number of eth queues which were not allocated
170 */
171static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172{
173 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174
175 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
16a5fd92 176 * backward along the array could cause memory to be overridden
4864a16a
YM
177 */
178 for (cos = 1; cos < bp->max_cos; cos++) {
179 for (i = 0; i < old_eth_num - delta; i++) {
180 struct bnx2x_fastpath *fp = &bp->fp[i];
181 int new_idx = cos * (old_eth_num - delta) + i;
182
183 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184 sizeof(struct bnx2x_fp_txdata));
185 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
186 }
187 }
188}
189
a8f47eb7 190int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
619c5cb6 191
9f6c9258
DK
192/* free skb in the packet ring at pos idx
193 * return idx of last bd freed
194 */
6383c0b3 195static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
196 u16 idx, unsigned int *pkts_compl,
197 unsigned int *bytes_compl)
9f6c9258 198{
6383c0b3 199 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
200 struct eth_tx_start_bd *tx_start_bd;
201 struct eth_tx_bd *tx_data_bd;
202 struct sk_buff *skb = tx_buf->skb;
203 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 int nbd;
95e92fd4 205 u16 split_bd_len = 0;
9f6c9258
DK
206
207 /* prefetch skb end pointer to speedup dev_kfree_skb() */
208 prefetch(&skb->end);
209
51c1a580 210 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 211 txdata->txq_index, idx, tx_buf, skb);
9f6c9258 212
6383c0b3 213 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258
DK
214
215 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216#ifdef BNX2X_STOP_ON_ERROR
217 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218 BNX2X_ERR("BAD nbd!\n");
219 bnx2x_panic();
220 }
221#endif
222 new_cons = nbd + tx_buf->first_bd;
223
224 /* Get the next bd */
225 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226
227 /* Skip a parse bd... */
228 --nbd;
229 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230
fe26566d
DK
231 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232 /* Skip second parse bd... */
233 --nbd;
234 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
235 }
236
95e92fd4 237 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
9f6c9258 238 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
95e92fd4
MS
239 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
9f6c9258
DK
241 --nbd;
242 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
243 }
244
95e92fd4
MS
245 /* unmap first bd */
246 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
248 DMA_TO_DEVICE);
249
9f6c9258
DK
250 /* now free frags */
251 while (nbd > 0) {
252
6383c0b3 253 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
254 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256 if (--nbd)
257 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
258 }
259
260 /* release skb */
261 WARN_ON(!skb);
d8290ae5 262 if (likely(skb)) {
2df1a70a
TH
263 (*pkts_compl)++;
264 (*bytes_compl) += skb->len;
e1615903 265 dev_kfree_skb_any(skb);
2df1a70a 266 }
d8290ae5 267
9f6c9258
DK
268 tx_buf->first_bd = 0;
269 tx_buf->skb = NULL;
270
271 return new_cons;
272}
273
6383c0b3 274int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 275{
9f6c9258 276 struct netdev_queue *txq;
6383c0b3 277 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 278 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
279
280#ifdef BNX2X_STOP_ON_ERROR
281 if (unlikely(bp->panic))
282 return -1;
283#endif
284
6383c0b3
AE
285 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
288
289 while (sw_cons != hw_cons) {
290 u16 pkt_cons;
291
292 pkt_cons = TX_BD(sw_cons);
293
51c1a580
MS
294 DP(NETIF_MSG_TX_DONE,
295 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 296 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 297
2df1a70a 298 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 299 &pkts_compl, &bytes_compl);
2df1a70a 300
9f6c9258
DK
301 sw_cons++;
302 }
303
2df1a70a
TH
304 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
305
6383c0b3
AE
306 txdata->tx_pkt_cons = sw_cons;
307 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
308
309 /* Need to make the tx_bd_cons update visible to start_xmit()
310 * before checking for netif_tx_queue_stopped(). Without the
311 * memory barrier, there is a small possibility that
312 * start_xmit() will miss it and cause the queue to be stopped
313 * forever.
619c5cb6
VZ
314 * On the other hand we need an rmb() here to ensure the proper
315 * ordering of bit testing in the following
316 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
317 */
318 smp_mb();
319
9f6c9258 320 if (unlikely(netif_tx_queue_stopped(txq))) {
16a5fd92 321 /* Taking tx_lock() is needed to prevent re-enabling the queue
9f6c9258
DK
322 * while it's empty. This could have happen if rx_action() gets
323 * suspended in bnx2x_tx_int() after the condition before
324 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
325 *
326 * stops the queue->sees fresh tx_bd_cons->releases the queue->
327 * sends some packets consuming the whole queue again->
328 * stops the queue
329 */
330
331 __netif_tx_lock(txq, smp_processor_id());
332
333 if ((netif_tx_queue_stopped(txq)) &&
334 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 335 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
336 netif_tx_wake_queue(txq);
337
338 __netif_tx_unlock(txq);
339 }
340 return 0;
341}
342
343static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
344 u16 idx)
345{
346 u16 last_max = fp->last_max_sge;
347
348 if (SUB_S16(idx, last_max) > 0)
349 fp->last_max_sge = idx;
350}
351
621b4d66
DK
352static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
353 u16 sge_len,
354 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
355{
356 struct bnx2x *bp = fp->bp;
9f6c9258
DK
357 u16 last_max, last_elem, first_elem;
358 u16 delta = 0;
359 u16 i;
360
361 if (!sge_len)
362 return;
363
364 /* First mark all used pages */
365 for (i = 0; i < sge_len; i++)
619c5cb6 366 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 367 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
368
369 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 370 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
371
372 /* Here we assume that the last SGE index is the biggest */
373 prefetch((void *)(fp->sge_mask));
523224a3 374 bnx2x_update_last_max_sge(fp,
621b4d66 375 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
376
377 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
378 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
379 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
380
381 /* If ring is not full */
382 if (last_elem + 1 != first_elem)
383 last_elem++;
384
385 /* Now update the prod */
386 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
387 if (likely(fp->sge_mask[i]))
388 break;
389
619c5cb6
VZ
390 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
391 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
392 }
393
394 if (delta > 0) {
395 fp->rx_sge_prod += delta;
396 /* clear page-end entries */
397 bnx2x_clear_sge_mask_next_elems(fp);
398 }
399
400 DP(NETIF_MSG_RX_STATUS,
401 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
402 fp->last_max_sge, fp->rx_sge_prod);
403}
404
2de67439 405/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
406 * CQE (calculated by HW).
407 */
408static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb 409 const struct eth_fast_path_rx_cqe *cqe,
5495ab75 410 enum pkt_hash_types *rxhash_type)
e52fcb24 411{
2de67439 412 /* Get Toeplitz hash from CQE */
e52fcb24 413 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
414 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
415 enum eth_rss_hash_type htype;
416
417 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
5495ab75
TH
418 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
419 (htype == TCP_IPV6_HASH_TYPE)) ?
420 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
421
e52fcb24 422 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb 423 }
5495ab75 424 *rxhash_type = PKT_HASH_TYPE_NONE;
e52fcb24
ED
425 return 0;
426}
427
9f6c9258 428static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 429 u16 cons, u16 prod,
619c5cb6 430 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
431{
432 struct bnx2x *bp = fp->bp;
433 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
434 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
435 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
436 dma_addr_t mapping;
619c5cb6
VZ
437 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
438 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 439
619c5cb6
VZ
440 /* print error if current state != stop */
441 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
442 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
443
e52fcb24 444 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 445 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 446 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
447 fp->rx_buf_size, DMA_FROM_DEVICE);
448 /*
449 * ...if it fails - move the skb from the consumer to the producer
450 * and set the current aggregation state as ERROR to drop it
451 * when TPA_STOP arrives.
452 */
453
454 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
455 /* Move the BD from the consumer to the producer */
e52fcb24 456 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
457 tpa_info->tpa_state = BNX2X_TPA_ERROR;
458 return;
459 }
9f6c9258 460
e52fcb24
ED
461 /* move empty data from pool to prod */
462 prod_rx_buf->data = first_buf->data;
619c5cb6 463 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 464 /* point prod_bd to new data */
9f6c9258
DK
465 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
466 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
467
619c5cb6
VZ
468 /* move partial skb from cons to pool (don't unmap yet) */
469 *first_buf = *cons_rx_buf;
470
471 /* mark bin state as START */
472 tpa_info->parsing_flags =
473 le16_to_cpu(cqe->pars_flags.flags);
474 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
475 tpa_info->tpa_state = BNX2X_TPA_START;
476 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
477 tpa_info->placement_offset = cqe->placement_offset;
5495ab75 478 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
621b4d66
DK
479 if (fp->mode == TPA_MODE_GRO) {
480 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 481 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
482 tpa_info->gro_size = gro_size;
483 }
619c5cb6 484
9f6c9258
DK
485#ifdef BNX2X_STOP_ON_ERROR
486 fp->tpa_queue_used |= (1 << queue);
9f6c9258 487 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
9f6c9258
DK
488 fp->tpa_queue_used);
489#endif
490}
491
e4e3c02a
VZ
492/* Timestamp option length allowed for TPA aggregation:
493 *
494 * nop nop kind length echo val
495 */
496#define TPA_TSTAMP_OPT_LEN 12
497/**
cbf1de72 498 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 499 *
cbf1de72 500 * @skb: packet skb
e8920674
DK
501 * @parsing_flags: parsing flags from the START CQE
502 * @len_on_bd: total length of the first packet for the
503 * aggregation.
cbf1de72 504 * @pkt_len: length of all segments
e8920674
DK
505 *
506 * Approximate value of the MSS for this aggregation calculated using
507 * the first packet of it.
2de67439 508 * Compute number of aggregated segments, and gso_type.
e4e3c02a 509 */
cbf1de72 510static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
511 u16 len_on_bd, unsigned int pkt_len,
512 u16 num_of_coalesced_segs)
e4e3c02a 513{
cbf1de72 514 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 515 * other than timestamp or IPv6 extension headers.
e4e3c02a 516 */
619c5cb6
VZ
517 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
518
519 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 520 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 521 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
522 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
523 } else {
619c5cb6 524 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
526 }
e4e3c02a
VZ
527
528 /* Check if there was a TCP timestamp, if there is it's will
529 * always be 12 bytes length: nop nop kind length echo val.
530 *
531 * Otherwise FW would close the aggregation.
532 */
533 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
534 hdrs_len += TPA_TSTAMP_OPT_LEN;
535
cbf1de72
YM
536 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
537
538 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
539 * to skb_shinfo(skb)->gso_segs
540 */
ab5777d7 541 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
542}
543
996dedba
MS
544static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
545 u16 index, gfp_t gfp_mask)
1191cb83 546{
1191cb83
ED
547 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
548 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
4cace675 549 struct bnx2x_alloc_pool *pool = &fp->page_pool;
1191cb83
ED
550 dma_addr_t mapping;
551
b9032741 552 if (!pool->page) {
4cace675 553 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
5c9ffde4 554 if (unlikely(!pool->page))
4cace675 555 return -ENOMEM;
4cace675 556
4cace675 557 pool->offset = 0;
1191cb83
ED
558 }
559
8031612d
MS
560 mapping = dma_map_page(&bp->pdev->dev, pool->page,
561 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
562 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
563 BNX2X_ERR("Can't map sge\n");
564 return -ENOMEM;
565 }
566
4cace675
GKB
567 sw_buf->page = pool->page;
568 sw_buf->offset = pool->offset;
569
1191cb83
ED
570 dma_unmap_addr_set(sw_buf, mapping, mapping);
571
572 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
573 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
574
4cace675 575 pool->offset += SGE_PAGE_SIZE;
b9032741
ED
576 if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
577 get_page(pool->page);
578 else
579 pool->page = NULL;
1191cb83
ED
580 return 0;
581}
582
9f6c9258 583static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
584 struct bnx2x_agg_info *tpa_info,
585 u16 pages,
586 struct sk_buff *skb,
619c5cb6
VZ
587 struct eth_end_agg_rx_cqe *cqe,
588 u16 cqe_idx)
9f6c9258
DK
589{
590 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
591 u32 i, frag_len, frag_size;
592 int err, j, frag_id = 0;
619c5cb6 593 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 594 u16 full_page = 0, gro_size = 0;
9f6c9258 595
619c5cb6 596 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
597
598 if (fp->mode == TPA_MODE_GRO) {
599 gro_size = tpa_info->gro_size;
600 full_page = tpa_info->full_page;
601 }
9f6c9258
DK
602
603 /* This is needed in order to enable forwarding support */
cbf1de72
YM
604 if (frag_size)
605 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
606 le16_to_cpu(cqe->pkt_len),
607 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 608
9f6c9258 609#ifdef BNX2X_STOP_ON_ERROR
924d75ab 610 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
611 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
612 pages, cqe_idx);
619c5cb6 613 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
614 bnx2x_panic();
615 return -EINVAL;
616 }
617#endif
618
619 /* Run through the SGL and compose the fragmented skb */
620 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 621 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
622
623 /* FW gives the indices of the SGE as if the ring is an array
624 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
625 if (fp->mode == TPA_MODE_GRO)
626 frag_len = min_t(u32, frag_size, (u32)full_page);
627 else /* LRO */
924d75ab 628 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 629
9f6c9258
DK
630 rx_pg = &fp->rx_page_ring[sge_idx];
631 old_rx_pg = *rx_pg;
632
633 /* If we fail to allocate a substitute page, we simply stop
634 where we are and drop the whole packet */
996dedba 635 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
9f6c9258 636 if (unlikely(err)) {
15192a8c 637 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
638 return err;
639 }
640
8031612d
MS
641 dma_unmap_page(&bp->pdev->dev,
642 dma_unmap_addr(&old_rx_pg, mapping),
643 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
9f6c9258 644 /* Add one frag and update the appropriate fields in the skb */
621b4d66 645 if (fp->mode == TPA_MODE_LRO)
4cace675
GKB
646 skb_fill_page_desc(skb, j, old_rx_pg.page,
647 old_rx_pg.offset, frag_len);
621b4d66
DK
648 else { /* GRO */
649 int rem;
650 int offset = 0;
651 for (rem = frag_len; rem > 0; rem -= gro_size) {
652 int len = rem > gro_size ? gro_size : rem;
653 skb_fill_page_desc(skb, frag_id++,
4cace675
GKB
654 old_rx_pg.page,
655 old_rx_pg.offset + offset,
656 len);
621b4d66
DK
657 if (offset)
658 get_page(old_rx_pg.page);
659 offset += len;
660 }
661 }
9f6c9258
DK
662
663 skb->data_len += frag_len;
924d75ab 664 skb->truesize += SGE_PAGES;
9f6c9258
DK
665 skb->len += frag_len;
666
667 frag_size -= frag_len;
668 }
669
670 return 0;
671}
672
d46d132c
ED
673static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
674{
675 if (fp->rx_frag_size)
e51423d9 676 skb_free_frag(data);
d46d132c
ED
677 else
678 kfree(data);
679}
680
996dedba 681static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
d46d132c 682{
996dedba
MS
683 if (fp->rx_frag_size) {
684 /* GFP_KERNEL allocations are used only during initialization */
d0164adc 685 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
996dedba
MS
686 return (void *)__get_free_page(gfp_mask);
687
d46d132c 688 return netdev_alloc_frag(fp->rx_frag_size);
996dedba 689 }
d46d132c 690
996dedba 691 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
d46d132c
ED
692}
693
9969085e
YM
694#ifdef CONFIG_INET
695static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
696{
697 const struct iphdr *iph = ip_hdr(skb);
698 struct tcphdr *th;
699
700 skb_set_transport_header(skb, sizeof(struct iphdr));
701 th = tcp_hdr(skb);
702
703 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
704 iph->saddr, iph->daddr, 0);
705}
706
707static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
708{
709 struct ipv6hdr *iph = ipv6_hdr(skb);
710 struct tcphdr *th;
711
712 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
713 th = tcp_hdr(skb);
714
715 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
716 &iph->saddr, &iph->daddr, 0);
717}
2c2d06d5
YM
718
719static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
720 void (*gro_func)(struct bnx2x*, struct sk_buff*))
721{
0e24c0ad 722 skb_reset_network_header(skb);
2c2d06d5
YM
723 gro_func(bp, skb);
724 tcp_gro_complete(skb);
725}
9969085e
YM
726#endif
727
728static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
729 struct sk_buff *skb)
730{
731#ifdef CONFIG_INET
cbf1de72 732 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
733 switch (be16_to_cpu(skb->protocol)) {
734 case ETH_P_IP:
2c2d06d5 735 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
9969085e
YM
736 break;
737 case ETH_P_IPV6:
2c2d06d5 738 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
9969085e
YM
739 break;
740 default:
9adab1b0 741 WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
9969085e
YM
742 be16_to_cpu(skb->protocol));
743 }
9969085e
YM
744 }
745#endif
60e66fee 746 skb_record_rx_queue(skb, fp->rx_queue);
9969085e
YM
747 napi_gro_receive(&fp->napi, skb);
748}
749
1191cb83
ED
750static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
751 struct bnx2x_agg_info *tpa_info,
752 u16 pages,
753 struct eth_end_agg_rx_cqe *cqe,
754 u16 cqe_idx)
9f6c9258 755{
619c5cb6 756 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 757 u8 pad = tpa_info->placement_offset;
619c5cb6 758 u16 len = tpa_info->len_on_bd;
e52fcb24 759 struct sk_buff *skb = NULL;
621b4d66 760 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
761 u8 old_tpa_state = tpa_info->tpa_state;
762
763 tpa_info->tpa_state = BNX2X_TPA_STOP;
764
765 /* If we there was an error during the handling of the TPA_START -
766 * drop this aggregation.
767 */
768 if (old_tpa_state == BNX2X_TPA_ERROR)
769 goto drop;
770
e52fcb24 771 /* Try to allocate the new data */
996dedba 772 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
9f6c9258
DK
773 /* Unmap skb in the pool anyway, as we are going to change
774 pool entry status to BNX2X_TPA_STOP even if new skb allocation
775 fails. */
776 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 777 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 778 if (likely(new_data))
d46d132c 779 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 780
e52fcb24 781 if (likely(skb)) {
9f6c9258 782#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 783 if (pad + len > fp->rx_buf_size) {
51c1a580 784 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 785 pad, len, fp->rx_buf_size);
9f6c9258
DK
786 bnx2x_panic();
787 return;
788 }
789#endif
790
e52fcb24 791 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 792 skb_put(skb, len);
5495ab75 793 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
9f6c9258
DK
794
795 skb->protocol = eth_type_trans(skb, bp->dev);
796 skb->ip_summed = CHECKSUM_UNNECESSARY;
797
621b4d66
DK
798 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
799 skb, cqe, cqe_idx)) {
619c5cb6 800 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
86a9bad3 801 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
9969085e 802 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 803 } else {
51c1a580
MS
804 DP(NETIF_MSG_RX_STATUS,
805 "Failed to allocate new pages - dropping packet!\n");
40955532 806 dev_kfree_skb_any(skb);
9f6c9258
DK
807 }
808
e52fcb24
ED
809 /* put new data in bin */
810 rx_buf->data = new_data;
9f6c9258 811
619c5cb6 812 return;
9f6c9258 813 }
07b0f009
ED
814 if (new_data)
815 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
816drop:
817 /* drop the packet and keep the buffer in the bin */
818 DP(NETIF_MSG_RX_STATUS,
819 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 820 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
821}
822
996dedba
MS
823static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
824 u16 index, gfp_t gfp_mask)
1191cb83
ED
825{
826 u8 *data;
827 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
828 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
829 dma_addr_t mapping;
830
996dedba 831 data = bnx2x_frag_alloc(fp, gfp_mask);
1191cb83
ED
832 if (unlikely(data == NULL))
833 return -ENOMEM;
834
835 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
836 fp->rx_buf_size,
837 DMA_FROM_DEVICE);
838 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 839 bnx2x_frag_free(fp, data);
1191cb83
ED
840 BNX2X_ERR("Can't map rx data\n");
841 return -ENOMEM;
842 }
843
844 rx_buf->data = data;
845 dma_unmap_addr_set(rx_buf, mapping, mapping);
846
847 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
848 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
849
850 return 0;
851}
852
15192a8c
BW
853static
854void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
855 struct bnx2x_fastpath *fp,
856 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 857{
e488921f
MS
858 /* Do nothing if no L4 csum validation was done.
859 * We do not check whether IP csum was validated. For IPv4 we assume
860 * that if the card got as far as validating the L4 csum, it also
861 * validated the IP csum. IPv6 has no IP csum.
862 */
d6cb3e41 863 if (cqe->fast_path_cqe.status_flags &
e488921f 864 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
865 return;
866
e488921f 867 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
868
869 if (cqe->fast_path_cqe.type_error_flags &
870 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
871 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 872 qstats->hw_csum_err++;
d6cb3e41
ED
873 else
874 skb->ip_summed = CHECKSUM_UNNECESSARY;
875}
9f6c9258 876
a8f47eb7 877static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
9f6c9258
DK
878{
879 struct bnx2x *bp = fp->bp;
880 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
75b29459 881 u16 sw_comp_cons, sw_comp_prod;
9f6c9258 882 int rx_pkt = 0;
75b29459
DK
883 union eth_rx_cqe *cqe;
884 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258
DK
885
886#ifdef BNX2X_STOP_ON_ERROR
887 if (unlikely(bp->panic))
888 return 0;
889#endif
b3529744
EB
890 if (budget <= 0)
891 return rx_pkt;
9f6c9258 892
9f6c9258
DK
893 bd_cons = fp->rx_bd_cons;
894 bd_prod = fp->rx_bd_prod;
895 bd_prod_fw = bd_prod;
896 sw_comp_cons = fp->rx_comp_cons;
897 sw_comp_prod = fp->rx_comp_prod;
898
75b29459
DK
899 comp_ring_cons = RCQ_BD(sw_comp_cons);
900 cqe = &fp->rx_comp_ring[comp_ring_cons];
901 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
902
903 DP(NETIF_MSG_RX_STATUS,
75b29459 904 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
9f6c9258 905
75b29459 906 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
9f6c9258
DK
907 struct sw_rx_bd *rx_buf = NULL;
908 struct sk_buff *skb;
9f6c9258 909 u8 cqe_fp_flags;
619c5cb6 910 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 911 u16 len, pad, queue;
e52fcb24 912 u8 *data;
bd5cef03 913 u32 rxhash;
5495ab75 914 enum pkt_hash_types rxhash_type;
9f6c9258 915
619c5cb6
VZ
916#ifdef BNX2X_STOP_ON_ERROR
917 if (unlikely(bp->panic))
918 return 0;
919#endif
920
9f6c9258
DK
921 bd_prod = RX_BD(bd_prod);
922 bd_cons = RX_BD(bd_cons);
923
9aaae044 924 /* A rmb() is required to ensure that the CQE is not read
925 * before it is written by the adapter DMA. PCI ordering
926 * rules will make sure the other fields are written before
927 * the marker at the end of struct eth_fast_path_rx_cqe
928 * but without rmb() a weakly ordered processor can process
929 * stale data. Without the barrier TPA state-machine might
930 * enter inconsistent state and kernel stack might be
931 * provided with incorrect packet description - these lead
932 * to various kernel crashed.
933 */
934 rmb();
935
619c5cb6
VZ
936 cqe_fp_flags = cqe_fp->type_error_flags;
937 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 938
51c1a580
MS
939 DP(NETIF_MSG_RX_STATUS,
940 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
941 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
942 cqe_fp_flags, cqe_fp->status_flags,
943 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
944 le16_to_cpu(cqe_fp->vlan_tag),
945 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
946
947 /* is this a slowpath msg? */
619c5cb6 948 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
949 bnx2x_sp_event(fp, cqe);
950 goto next_cqe;
e52fcb24 951 }
621b4d66 952
e52fcb24
ED
953 rx_buf = &fp->rx_buf_ring[bd_cons];
954 data = rx_buf->data;
9f6c9258 955
e52fcb24 956 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
957 struct bnx2x_agg_info *tpa_info;
958 u16 frag_size, pages;
619c5cb6 959#ifdef BNX2X_STOP_ON_ERROR
e52fcb24 960 /* sanity check */
7e6b4d44 961 if (fp->mode == TPA_MODE_DISABLED &&
e52fcb24
ED
962 (CQE_TYPE_START(cqe_fp_type) ||
963 CQE_TYPE_STOP(cqe_fp_type)))
7e6b4d44 964 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
e52fcb24 965 CQE_TYPE(cqe_fp_type));
619c5cb6 966#endif
9f6c9258 967
e52fcb24
ED
968 if (CQE_TYPE_START(cqe_fp_type)) {
969 u16 queue = cqe_fp->queue_index;
970 DP(NETIF_MSG_RX_STATUS,
971 "calling tpa_start on queue %d\n",
972 queue);
9f6c9258 973
e52fcb24
ED
974 bnx2x_tpa_start(fp, queue,
975 bd_cons, bd_prod,
976 cqe_fp);
621b4d66 977
e52fcb24 978 goto next_rx;
621b4d66
DK
979 }
980 queue = cqe->end_agg_cqe.queue_index;
981 tpa_info = &fp->tpa_info[queue];
982 DP(NETIF_MSG_RX_STATUS,
983 "calling tpa_stop on queue %d\n",
984 queue);
985
986 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
987 tpa_info->len_on_bd;
988
989 if (fp->mode == TPA_MODE_GRO)
990 pages = (frag_size + tpa_info->full_page - 1) /
991 tpa_info->full_page;
992 else
993 pages = SGE_PAGE_ALIGN(frag_size) >>
994 SGE_PAGE_SHIFT;
995
996 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
997 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 998#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
999 if (bp->panic)
1000 return 0;
9f6c9258
DK
1001#endif
1002
621b4d66
DK
1003 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1004 goto next_cqe;
e52fcb24
ED
1005 }
1006 /* non TPA */
621b4d66 1007 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
1008 pad = cqe_fp->placement_offset;
1009 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 1010 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
1011 pad + RX_COPY_THRESH,
1012 DMA_FROM_DEVICE);
1013 pad += NET_SKB_PAD;
1014 prefetch(data + pad); /* speedup eth_type_trans() */
1015 /* is this an error packet? */
1016 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 1017 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
1018 "ERROR flags %x rx packet %u\n",
1019 cqe_fp_flags, sw_comp_cons);
15192a8c 1020 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
1021 goto reuse_rx;
1022 }
9f6c9258 1023
e52fcb24
ED
1024 /* Since we don't have a jumbo ring
1025 * copy small packets if mtu > 1500
1026 */
1027 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1028 (len <= RX_COPY_THRESH)) {
45abfb10 1029 skb = napi_alloc_skb(&fp->napi, len);
e52fcb24 1030 if (skb == NULL) {
51c1a580 1031 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 1032 "ERROR packet dropped because of alloc failure\n");
15192a8c 1033 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
1034 goto reuse_rx;
1035 }
e52fcb24
ED
1036 memcpy(skb->data, data + pad, len);
1037 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1038 } else {
996dedba
MS
1039 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1040 GFP_ATOMIC) == 0)) {
9f6c9258 1041 dma_unmap_single(&bp->pdev->dev,
e52fcb24 1042 dma_unmap_addr(rx_buf, mapping),
a8c94b91 1043 fp->rx_buf_size,
9f6c9258 1044 DMA_FROM_DEVICE);
d46d132c 1045 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 1046 if (unlikely(!skb)) {
d46d132c 1047 bnx2x_frag_free(fp, data);
15192a8c
BW
1048 bnx2x_fp_qstats(bp, fp)->
1049 rx_skb_alloc_failed++;
e52fcb24
ED
1050 goto next_rx;
1051 }
9f6c9258 1052 skb_reserve(skb, pad);
9f6c9258 1053 } else {
51c1a580
MS
1054 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1055 "ERROR packet dropped because of alloc failure\n");
15192a8c 1056 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 1057reuse_rx:
e52fcb24 1058 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
1059 goto next_rx;
1060 }
036d2df9 1061 }
9f6c9258 1062
036d2df9
DK
1063 skb_put(skb, len);
1064 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 1065
036d2df9 1066 /* Set Toeplitz hash for a none-LRO skb */
5495ab75
TH
1067 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1068 skb_set_hash(skb, rxhash, rxhash_type);
9f6c9258 1069
036d2df9 1070 skb_checksum_none_assert(skb);
f85582f8 1071
d6cb3e41 1072 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
1073 bnx2x_csum_validate(skb, cqe, fp,
1074 bnx2x_fp_qstats(bp, fp));
9f6c9258 1075
f233cafe 1076 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 1077
eeed018c 1078 /* Check if this packet was timestamped */
56daf66d 1079 if (unlikely(cqe->fast_path_cqe.type_error_flags &
eeed018c
MK
1080 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1081 bnx2x_set_rx_ts(bp, skb);
1082
619c5cb6
VZ
1083 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1084 PARSING_FLAGS_VLAN)
86a9bad3 1085 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
619c5cb6 1086 le16_to_cpu(cqe_fp->vlan_tag));
9f6c9258 1087
b59768c6 1088 napi_gro_receive(&fp->napi, skb);
9f6c9258 1089next_rx:
e52fcb24 1090 rx_buf->data = NULL;
9f6c9258
DK
1091
1092 bd_cons = NEXT_RX_IDX(bd_cons);
1093 bd_prod = NEXT_RX_IDX(bd_prod);
1094 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1095 rx_pkt++;
1096next_cqe:
1097 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1098 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1099
75b29459
DK
1100 /* mark CQE as free */
1101 BNX2X_SEED_CQE(cqe_fp);
1102
9f6c9258
DK
1103 if (rx_pkt == budget)
1104 break;
75b29459
DK
1105
1106 comp_ring_cons = RCQ_BD(sw_comp_cons);
1107 cqe = &fp->rx_comp_ring[comp_ring_cons];
1108 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
1109 } /* while */
1110
1111 fp->rx_bd_cons = bd_cons;
1112 fp->rx_bd_prod = bd_prod_fw;
1113 fp->rx_comp_cons = sw_comp_cons;
1114 fp->rx_comp_prod = sw_comp_prod;
1115
1116 /* Update producers */
1117 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1118 fp->rx_sge_prod);
1119
9f6c9258
DK
1120 return rx_pkt;
1121}
1122
1123static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1124{
1125 struct bnx2x_fastpath *fp = fp_cookie;
1126 struct bnx2x *bp = fp->bp;
6383c0b3 1127 u8 cos;
9f6c9258 1128
51c1a580
MS
1129 DP(NETIF_MSG_INTR,
1130 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3 1131 fp->index, fp->fw_sb_id, fp->igu_sb_id);
ecf01c22 1132
523224a3 1133 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1134
1135#ifdef BNX2X_STOP_ON_ERROR
1136 if (unlikely(bp->panic))
1137 return IRQ_HANDLED;
1138#endif
1139
1140 /* Handle Rx and Tx according to MSI-X vector */
6383c0b3 1141 for_each_cos_in_tx_queue(fp, cos)
65565884 1142 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1143
523224a3 1144 prefetch(&fp->sb_running_index[SM_RX_ID]);
f5fbf115 1145 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
9f6c9258
DK
1146
1147 return IRQ_HANDLED;
1148}
1149
9f6c9258
DK
1150/* HW Lock for shared dual port PHYs */
1151void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1152{
1153 mutex_lock(&bp->port.phy_mutex);
1154
8203c4b6 1155 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1156}
1157
1158void bnx2x_release_phy_lock(struct bnx2x *bp)
1159{
8203c4b6 1160 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1161
1162 mutex_unlock(&bp->port.phy_mutex);
1163}
1164
0793f83f
DK
1165/* calculates MF speed according to current linespeed and MF configuration */
1166u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1167{
1168 u16 line_speed = bp->link_vars.line_speed;
1169 if (IS_MF(bp)) {
faa6fcbb
DK
1170 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1171 bp->mf_config[BP_VN(bp)]);
1172
1173 /* Calculate the current MAX line speed limit for the MF
1174 * devices
0793f83f 1175 */
da3cc2da 1176 if (IS_MF_PERCENT_BW(bp))
faa6fcbb
DK
1177 line_speed = (line_speed * maxCfg) / 100;
1178 else { /* SD mode */
0793f83f
DK
1179 u16 vn_max_rate = maxCfg * 100;
1180
1181 if (vn_max_rate < line_speed)
1182 line_speed = vn_max_rate;
faa6fcbb 1183 }
0793f83f
DK
1184 }
1185
1186 return line_speed;
1187}
1188
2ae17f66
VZ
1189/**
1190 * bnx2x_fill_report_data - fill link report data to report
1191 *
1192 * @bp: driver handle
1193 * @data: link state to update
1194 *
1195 * It uses a none-atomic bit operations because is called under the mutex.
1196 */
1191cb83
ED
1197static void bnx2x_fill_report_data(struct bnx2x *bp,
1198 struct bnx2x_link_report_data *data)
2ae17f66 1199{
2ae17f66
VZ
1200 memset(data, 0, sizeof(*data));
1201
6495d15a
DK
1202 if (IS_PF(bp)) {
1203 /* Fill the report data: effective line speed */
1204 data->line_speed = bnx2x_get_mf_speed(bp);
1205
1206 /* Link is down */
1207 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1208 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1209 &data->link_report_flags);
1210
1211 if (!BNX2X_NUM_ETH_QUEUES(bp))
1212 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1213 &data->link_report_flags);
1214
1215 /* Full DUPLEX */
1216 if (bp->link_vars.duplex == DUPLEX_FULL)
1217 __set_bit(BNX2X_LINK_REPORT_FD,
1218 &data->link_report_flags);
1219
1220 /* Rx Flow Control is ON */
1221 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1222 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1223 &data->link_report_flags);
1224
1225 /* Tx Flow Control is ON */
1226 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1227 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1228 &data->link_report_flags);
1229 } else { /* VF */
1230 *data = bp->vf_link_vars;
1231 }
2ae17f66
VZ
1232}
1233
1234/**
1235 * bnx2x_link_report - report link status to OS.
1236 *
1237 * @bp: driver handle
1238 *
1239 * Calls the __bnx2x_link_report() under the same locking scheme
1240 * as a link/PHY state managing code to ensure a consistent link
1241 * reporting.
1242 */
1243
9f6c9258
DK
1244void bnx2x_link_report(struct bnx2x *bp)
1245{
2ae17f66
VZ
1246 bnx2x_acquire_phy_lock(bp);
1247 __bnx2x_link_report(bp);
1248 bnx2x_release_phy_lock(bp);
1249}
9f6c9258 1250
2ae17f66
VZ
1251/**
1252 * __bnx2x_link_report - report link status to OS.
1253 *
1254 * @bp: driver handle
1255 *
16a5fd92 1256 * None atomic implementation.
2ae17f66
VZ
1257 * Should be called under the phy_lock.
1258 */
1259void __bnx2x_link_report(struct bnx2x *bp)
1260{
1261 struct bnx2x_link_report_data cur_data;
9f6c9258 1262
2ae17f66 1263 /* reread mf_cfg */
ad5afc89 1264 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1265 bnx2x_read_mf_cfg(bp);
1266
1267 /* Read the current link report info */
1268 bnx2x_fill_report_data(bp, &cur_data);
1269
1270 /* Don't report link down or exactly the same link status twice */
1271 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1272 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1273 &bp->last_reported_link.link_report_flags) &&
1274 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1275 &cur_data.link_report_flags)))
1276 return;
1277
1278 bp->link_cnt++;
9f6c9258 1279
2ae17f66
VZ
1280 /* We are going to report a new link parameters now -
1281 * remember the current data for the next time.
1282 */
1283 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1284
6495d15a
DK
1285 /* propagate status to VFs */
1286 if (IS_PF(bp))
1287 bnx2x_iov_link_update(bp);
1288
2ae17f66
VZ
1289 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1290 &cur_data.link_report_flags)) {
1291 netif_carrier_off(bp->dev);
1292 netdev_err(bp->dev, "NIC Link is Down\n");
1293 return;
1294 } else {
94f05b0f
JP
1295 const char *duplex;
1296 const char *flow;
1297
2ae17f66 1298 netif_carrier_on(bp->dev);
9f6c9258 1299
2ae17f66
VZ
1300 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1301 &cur_data.link_report_flags))
94f05b0f 1302 duplex = "full";
9f6c9258 1303 else
94f05b0f 1304 duplex = "half";
9f6c9258 1305
2ae17f66
VZ
1306 /* Handle the FC at the end so that only these flags would be
1307 * possibly set. This way we may easily check if there is no FC
1308 * enabled.
1309 */
1310 if (cur_data.link_report_flags) {
1311 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1312 &cur_data.link_report_flags)) {
2ae17f66
VZ
1313 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1314 &cur_data.link_report_flags))
94f05b0f
JP
1315 flow = "ON - receive & transmit";
1316 else
1317 flow = "ON - receive";
9f6c9258 1318 } else {
94f05b0f 1319 flow = "ON - transmit";
9f6c9258 1320 }
94f05b0f
JP
1321 } else {
1322 flow = "none";
9f6c9258 1323 }
94f05b0f
JP
1324 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1325 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1326 }
1327}
1328
1191cb83
ED
1329static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1330{
1331 int i;
1332
1333 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1334 struct eth_rx_sge *sge;
1335
1336 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1337 sge->addr_hi =
1338 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1339 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1340
1341 sge->addr_lo =
1342 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1343 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1344 }
1345}
1346
1347static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1348 struct bnx2x_fastpath *fp, int last)
1349{
1350 int i;
1351
1352 for (i = 0; i < last; i++) {
1353 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1354 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1355 u8 *data = first_buf->data;
1356
1357 if (data == NULL) {
1358 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1359 continue;
1360 }
1361 if (tpa_info->tpa_state == BNX2X_TPA_START)
1362 dma_unmap_single(&bp->pdev->dev,
1363 dma_unmap_addr(first_buf, mapping),
1364 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1365 bnx2x_frag_free(fp, data);
1191cb83
ED
1366 first_buf->data = NULL;
1367 }
1368}
1369
55c11941
MS
1370void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1371{
1372 int j;
1373
1374 for_each_rx_queue_cnic(bp, j) {
1375 struct bnx2x_fastpath *fp = &bp->fp[j];
1376
1377 fp->rx_bd_cons = 0;
1378
1379 /* Activate BD ring */
1380 /* Warning!
1381 * this will generate an interrupt (to the TSTORM)
1382 * must only be done after chip is initialized
1383 */
1384 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1385 fp->rx_sge_prod);
1386 }
1387}
1388
9f6c9258
DK
1389void bnx2x_init_rx_rings(struct bnx2x *bp)
1390{
1391 int func = BP_FUNC(bp);
523224a3 1392 u16 ring_prod;
9f6c9258 1393 int i, j;
25141580 1394
b3b83c3f 1395 /* Allocate TPA resources */
55c11941 1396 for_each_eth_queue(bp, j) {
523224a3 1397 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1398
a8c94b91
VZ
1399 DP(NETIF_MSG_IFUP,
1400 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1401
7e6b4d44 1402 if (fp->mode != TPA_MODE_DISABLED) {
16a5fd92 1403 /* Fill the per-aggregation pool */
dfacf138 1404 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1405 struct bnx2x_agg_info *tpa_info =
1406 &fp->tpa_info[i];
1407 struct sw_rx_bd *first_buf =
1408 &tpa_info->first_buf;
1409
996dedba
MS
1410 first_buf->data =
1411 bnx2x_frag_alloc(fp, GFP_KERNEL);
e52fcb24 1412 if (!first_buf->data) {
51c1a580
MS
1413 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1414 j);
9f6c9258 1415 bnx2x_free_tpa_pool(bp, fp, i);
7e6b4d44 1416 fp->mode = TPA_MODE_DISABLED;
9f6c9258
DK
1417 break;
1418 }
619c5cb6
VZ
1419 dma_unmap_addr_set(first_buf, mapping, 0);
1420 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1421 }
523224a3
DK
1422
1423 /* "next page" elements initialization */
1424 bnx2x_set_next_page_sgl(fp);
1425
1426 /* set SGEs bit mask */
1427 bnx2x_init_sge_ring_bit_mask(fp);
1428
1429 /* Allocate SGEs and initialize the ring elements */
1430 for (i = 0, ring_prod = 0;
1431 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1432
996dedba
MS
1433 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1434 GFP_KERNEL) < 0) {
51c1a580
MS
1435 BNX2X_ERR("was only able to allocate %d rx sges\n",
1436 i);
1437 BNX2X_ERR("disabling TPA for queue[%d]\n",
1438 j);
523224a3 1439 /* Cleanup already allocated elements */
619c5cb6
VZ
1440 bnx2x_free_rx_sge_range(bp, fp,
1441 ring_prod);
1442 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1443 MAX_AGG_QS(bp));
7e6b4d44 1444 fp->mode = TPA_MODE_DISABLED;
523224a3
DK
1445 ring_prod = 0;
1446 break;
1447 }
1448 ring_prod = NEXT_SGE_IDX(ring_prod);
1449 }
1450
1451 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1452 }
1453 }
1454
55c11941 1455 for_each_eth_queue(bp, j) {
9f6c9258
DK
1456 struct bnx2x_fastpath *fp = &bp->fp[j];
1457
1458 fp->rx_bd_cons = 0;
9f6c9258 1459
b3b83c3f
DK
1460 /* Activate BD ring */
1461 /* Warning!
1462 * this will generate an interrupt (to the TSTORM)
1463 * must only be done after chip is initialized
1464 */
1465 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1466 fp->rx_sge_prod);
9f6c9258 1467
9f6c9258
DK
1468 if (j != 0)
1469 continue;
1470
619c5cb6 1471 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1472 REG_WR(bp, BAR_USTRORM_INTMEM +
1473 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1474 U64_LO(fp->rx_comp_mapping));
1475 REG_WR(bp, BAR_USTRORM_INTMEM +
1476 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1477 U64_HI(fp->rx_comp_mapping));
1478 }
9f6c9258
DK
1479 }
1480}
f85582f8 1481
55c11941 1482static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1483{
6383c0b3 1484 u8 cos;
55c11941 1485 struct bnx2x *bp = fp->bp;
9f6c9258 1486
55c11941
MS
1487 for_each_cos_in_tx_queue(fp, cos) {
1488 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1489 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1490
55c11941
MS
1491 u16 sw_prod = txdata->tx_pkt_prod;
1492 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1493
55c11941
MS
1494 while (sw_cons != sw_prod) {
1495 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1496 &pkts_compl, &bytes_compl);
1497 sw_cons++;
9f6c9258 1498 }
55c11941
MS
1499
1500 netdev_tx_reset_queue(
1501 netdev_get_tx_queue(bp->dev,
1502 txdata->txq_index));
1503 }
1504}
1505
1506static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1507{
1508 int i;
1509
1510 for_each_tx_queue_cnic(bp, i) {
1511 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1512 }
1513}
1514
1515static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1516{
1517 int i;
1518
1519 for_each_eth_queue(bp, i) {
1520 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1521 }
1522}
1523
b3b83c3f
DK
1524static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1525{
1526 struct bnx2x *bp = fp->bp;
1527 int i;
1528
1529 /* ring wasn't allocated */
1530 if (fp->rx_buf_ring == NULL)
1531 return;
1532
1533 for (i = 0; i < NUM_RX_BD; i++) {
1534 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1535 u8 *data = rx_buf->data;
b3b83c3f 1536
e52fcb24 1537 if (data == NULL)
b3b83c3f 1538 continue;
b3b83c3f
DK
1539 dma_unmap_single(&bp->pdev->dev,
1540 dma_unmap_addr(rx_buf, mapping),
1541 fp->rx_buf_size, DMA_FROM_DEVICE);
1542
e52fcb24 1543 rx_buf->data = NULL;
d46d132c 1544 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1545 }
1546}
1547
55c11941
MS
1548static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1549{
1550 int j;
1551
1552 for_each_rx_queue_cnic(bp, j) {
1553 bnx2x_free_rx_bds(&bp->fp[j]);
1554 }
1555}
1556
9f6c9258
DK
1557static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1558{
b3b83c3f 1559 int j;
9f6c9258 1560
55c11941 1561 for_each_eth_queue(bp, j) {
9f6c9258
DK
1562 struct bnx2x_fastpath *fp = &bp->fp[j];
1563
b3b83c3f 1564 bnx2x_free_rx_bds(fp);
9f6c9258 1565
7e6b4d44 1566 if (fp->mode != TPA_MODE_DISABLED)
dfacf138 1567 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1568 }
1569}
1570
a8f47eb7 1571static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
55c11941
MS
1572{
1573 bnx2x_free_tx_skbs_cnic(bp);
1574 bnx2x_free_rx_skbs_cnic(bp);
1575}
1576
9f6c9258
DK
1577void bnx2x_free_skbs(struct bnx2x *bp)
1578{
1579 bnx2x_free_tx_skbs(bp);
1580 bnx2x_free_rx_skbs(bp);
1581}
1582
e3835b99
DK
1583void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1584{
1585 /* load old values */
1586 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1587
1588 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1589 /* leave all but MAX value */
1590 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1591
1592 /* set new MAX value */
1593 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1594 & FUNC_MF_CFG_MAX_BW_MASK;
1595
1596 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1597 }
1598}
1599
ca92429f
DK
1600/**
1601 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1602 *
1603 * @bp: driver handle
1604 * @nvecs: number of vectors to be released
1605 */
1606static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1607{
ca92429f 1608 int i, offset = 0;
9f6c9258 1609
ca92429f
DK
1610 if (nvecs == offset)
1611 return;
ad5afc89
AE
1612
1613 /* VFs don't have a default SB */
1614 if (IS_PF(bp)) {
1615 free_irq(bp->msix_table[offset].vector, bp->dev);
1616 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1617 bp->msix_table[offset].vector);
1618 offset++;
1619 }
55c11941
MS
1620
1621 if (CNIC_SUPPORT(bp)) {
1622 if (nvecs == offset)
1623 return;
1624 offset++;
1625 }
ca92429f 1626
ec6ba945 1627 for_each_eth_queue(bp, i) {
ca92429f
DK
1628 if (nvecs == offset)
1629 return;
51c1a580
MS
1630 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1631 i, bp->msix_table[offset].vector);
9f6c9258 1632
ca92429f 1633 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1634 }
1635}
1636
d6214d7a 1637void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1638{
30a5de77 1639 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1640 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1641 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1642
1643 /* vfs don't have a default status block */
1644 if (IS_PF(bp))
1645 nvecs++;
1646
1647 bnx2x_free_msix_irqs(bp, nvecs);
1648 } else {
30a5de77 1649 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1650 }
9f6c9258
DK
1651}
1652
0e8d2ec5 1653int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1654{
1ab4434c 1655 int msix_vec = 0, i, rc;
9f6c9258 1656
1ab4434c
AE
1657 /* VFs don't have a default status block */
1658 if (IS_PF(bp)) {
1659 bp->msix_table[msix_vec].entry = msix_vec;
1660 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1661 bp->msix_table[0].entry);
1662 msix_vec++;
1663 }
9f6c9258 1664
55c11941
MS
1665 /* Cnic requires an msix vector for itself */
1666 if (CNIC_SUPPORT(bp)) {
1667 bp->msix_table[msix_vec].entry = msix_vec;
1668 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1669 msix_vec, bp->msix_table[msix_vec].entry);
1670 msix_vec++;
1671 }
1672
6383c0b3 1673 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1674 for_each_eth_queue(bp, i) {
d6214d7a 1675 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1676 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1677 msix_vec, msix_vec, i);
d6214d7a 1678 msix_vec++;
9f6c9258
DK
1679 }
1680
1ab4434c
AE
1681 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1682 msix_vec);
d6214d7a 1683
a5444b17
AG
1684 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1685 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
9f6c9258
DK
1686 /*
1687 * reconfigure number of tx/rx queues according to available
1688 * MSI-X vectors
1689 */
a5444b17 1690 if (rc == -ENOSPC) {
30a5de77 1691 /* Get by with single vector */
a5444b17
AG
1692 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1693 if (rc < 0) {
30a5de77
DK
1694 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1695 rc);
1696 goto no_msix;
1697 }
1698
1699 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1700 bp->flags |= USING_SINGLE_MSIX_FLAG;
1701
55c11941
MS
1702 BNX2X_DEV_INFO("set number of queues to 1\n");
1703 bp->num_ethernet_queues = 1;
1704 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1705 } else if (rc < 0) {
a5444b17 1706 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1707 goto no_msix;
a5444b17
AG
1708 } else if (rc < msix_vec) {
1709 /* how less vectors we will have? */
1710 int diff = msix_vec - rc;
1711
1712 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1713
1714 /*
1715 * decrease number of queues by number of unallocated entries
1716 */
1717 bp->num_ethernet_queues -= diff;
1718 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1719
1720 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1721 bp->num_queues);
9f6c9258
DK
1722 }
1723
1724 bp->flags |= USING_MSIX_FLAG;
1725
1726 return 0;
30a5de77
DK
1727
1728no_msix:
1729 /* fall to INTx if not enough memory */
1730 if (rc == -ENOMEM)
1731 bp->flags |= DISABLE_MSI_FLAG;
1732
1733 return rc;
9f6c9258
DK
1734}
1735
1736static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1737{
ca92429f 1738 int i, rc, offset = 0;
9f6c9258 1739
ad5afc89
AE
1740 /* no default status block for vf */
1741 if (IS_PF(bp)) {
1742 rc = request_irq(bp->msix_table[offset++].vector,
1743 bnx2x_msix_sp_int, 0,
1744 bp->dev->name, bp->dev);
1745 if (rc) {
1746 BNX2X_ERR("request sp irq failed\n");
1747 return -EBUSY;
1748 }
9f6c9258
DK
1749 }
1750
55c11941
MS
1751 if (CNIC_SUPPORT(bp))
1752 offset++;
1753
ec6ba945 1754 for_each_eth_queue(bp, i) {
9f6c9258
DK
1755 struct bnx2x_fastpath *fp = &bp->fp[i];
1756 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1757 bp->dev->name, i);
1758
d6214d7a 1759 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1760 bnx2x_msix_fp_int, 0, fp->name, fp);
1761 if (rc) {
ca92429f
DK
1762 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1763 bp->msix_table[offset].vector, rc);
1764 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1765 return -EBUSY;
1766 }
1767
d6214d7a 1768 offset++;
9f6c9258
DK
1769 }
1770
ec6ba945 1771 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1772 if (IS_PF(bp)) {
1773 offset = 1 + CNIC_SUPPORT(bp);
1774 netdev_info(bp->dev,
1775 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1776 bp->msix_table[0].vector,
1777 0, bp->msix_table[offset].vector,
1778 i - 1, bp->msix_table[offset + i - 1].vector);
1779 } else {
1780 offset = CNIC_SUPPORT(bp);
1781 netdev_info(bp->dev,
1782 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1783 0, bp->msix_table[offset].vector,
1784 i - 1, bp->msix_table[offset + i - 1].vector);
1785 }
9f6c9258
DK
1786 return 0;
1787}
1788
d6214d7a 1789int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1790{
1791 int rc;
1792
1793 rc = pci_enable_msi(bp->pdev);
1794 if (rc) {
51c1a580 1795 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1796 return -1;
1797 }
1798 bp->flags |= USING_MSI_FLAG;
1799
1800 return 0;
1801}
1802
1803static int bnx2x_req_irq(struct bnx2x *bp)
1804{
1805 unsigned long flags;
30a5de77 1806 unsigned int irq;
9f6c9258 1807
30a5de77 1808 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1809 flags = 0;
1810 else
1811 flags = IRQF_SHARED;
1812
30a5de77
DK
1813 if (bp->flags & USING_MSIX_FLAG)
1814 irq = bp->msix_table[0].vector;
1815 else
1816 irq = bp->pdev->irq;
1817
1818 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1819}
1820
c957d09f 1821static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1822{
1823 int rc = 0;
30a5de77
DK
1824 if (bp->flags & USING_MSIX_FLAG &&
1825 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1826 rc = bnx2x_req_msix_irqs(bp);
1827 if (rc)
1828 return rc;
1829 } else {
619c5cb6
VZ
1830 rc = bnx2x_req_irq(bp);
1831 if (rc) {
1832 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1833 return rc;
1834 }
1835 if (bp->flags & USING_MSI_FLAG) {
1836 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1837 netdev_info(bp->dev, "using MSI IRQ %d\n",
1838 bp->dev->irq);
1839 }
1840 if (bp->flags & USING_MSIX_FLAG) {
1841 bp->dev->irq = bp->msix_table[0].vector;
1842 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1843 bp->dev->irq);
619c5cb6
VZ
1844 }
1845 }
1846
1847 return 0;
1848}
1849
55c11941
MS
1850static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1851{
1852 int i;
1853
8f20aa57 1854 for_each_rx_queue_cnic(bp, i) {
55c11941 1855 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1856 }
55c11941
MS
1857}
1858
1191cb83 1859static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1860{
1861 int i;
1862
8f20aa57 1863 for_each_eth_queue(bp, i) {
9f6c9258 1864 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1865 }
9f6c9258
DK
1866}
1867
55c11941
MS
1868static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1869{
1870 int i;
1871
8f20aa57 1872 for_each_rx_queue_cnic(bp, i) {
55c11941 1873 napi_disable(&bnx2x_fp(bp, i, napi));
8f20aa57 1874 }
55c11941
MS
1875}
1876
1191cb83 1877static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1878{
1879 int i;
1880
8f20aa57 1881 for_each_eth_queue(bp, i) {
9f6c9258 1882 napi_disable(&bnx2x_fp(bp, i, napi));
8f20aa57 1883 }
9f6c9258
DK
1884}
1885
1886void bnx2x_netif_start(struct bnx2x *bp)
1887{
4b7ed897
DK
1888 if (netif_running(bp->dev)) {
1889 bnx2x_napi_enable(bp);
55c11941
MS
1890 if (CNIC_LOADED(bp))
1891 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1892 bnx2x_int_enable(bp);
1893 if (bp->state == BNX2X_STATE_OPEN)
1894 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1895 }
1896}
1897
1898void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1899{
1900 bnx2x_int_disable_sync(bp, disable_hw);
1901 bnx2x_napi_disable(bp);
55c11941
MS
1902 if (CNIC_LOADED(bp))
1903 bnx2x_napi_disable_cnic(bp);
9f6c9258 1904}
9f6c9258 1905
f663dd9a 1906u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
99932d4f 1907 void *accel_priv, select_queue_fallback_t fallback)
8307fa3e 1908{
8307fa3e 1909 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1910
55c11941 1911 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1912 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1913 u16 ether_type = ntohs(hdr->h_proto);
1914
1915 /* Skip VLAN tag if present */
1916 if (ether_type == ETH_P_8021Q) {
1917 struct vlan_ethhdr *vhdr =
1918 (struct vlan_ethhdr *)skb->data;
1919
1920 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1921 }
1922
1923 /* If ethertype is FCoE or FIP - use FCoE ring */
1924 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1925 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1926 }
55c11941 1927
cdb9d6ae 1928 /* select a non-FCoE queue */
3968d389 1929 return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
8307fa3e
VZ
1930}
1931
d6214d7a
DK
1932void bnx2x_set_num_queues(struct bnx2x *bp)
1933{
96305234 1934 /* RSS queues */
55c11941 1935 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1936
a3348722 1937 /* override in STORAGE SD modes */
2e98ffc2 1938 if (IS_MF_STORAGE_ONLY(bp))
55c11941
MS
1939 bp->num_ethernet_queues = 1;
1940
ec6ba945 1941 /* Add special queues */
55c11941
MS
1942 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1943 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1944
1945 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1946}
1947
cdb9d6ae
VZ
1948/**
1949 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1950 *
1951 * @bp: Driver handle
1952 *
1953 * We currently support for at most 16 Tx queues for each CoS thus we will
1954 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1955 * bp->max_cos.
1956 *
1957 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1958 * index after all ETH L2 indices.
1959 *
1960 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1961 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
16a5fd92 1962 * 16..31,...) with indices that are not coupled with any real Tx queue.
cdb9d6ae
VZ
1963 *
1964 * The proper configuration of skb->queue_mapping is handled by
1965 * bnx2x_select_queue() and __skb_tx_hash().
1966 *
1967 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1968 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1969 */
55c11941 1970static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1971{
6383c0b3 1972 int rc, tx, rx;
ec6ba945 1973
65565884 1974 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1975 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1976
6383c0b3 1977/* account for fcoe queue */
55c11941
MS
1978 if (include_cnic && !NO_FCOE(bp)) {
1979 rx++;
1980 tx++;
6383c0b3 1981 }
6383c0b3
AE
1982
1983 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1984 if (rc) {
1985 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1986 return rc;
1987 }
1988 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1989 if (rc) {
1990 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1991 return rc;
1992 }
1993
51c1a580 1994 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1995 tx, rx);
1996
ec6ba945
VZ
1997 return rc;
1998}
1999
1191cb83 2000static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
2001{
2002 int i;
2003
2004 for_each_queue(bp, i) {
2005 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 2006 u32 mtu;
a8c94b91
VZ
2007
2008 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2009 if (IS_FCOE_IDX(i))
2010 /*
2011 * Although there are no IP frames expected to arrive to
2012 * this ring we still want to add an
2013 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2014 * overrun attack.
2015 */
e52fcb24 2016 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 2017 else
e52fcb24
ED
2018 mtu = bp->dev->mtu;
2019 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2020 IP_HEADER_ALIGNMENT_PADDING +
e1c6dcca 2021 ETH_OVERHEAD +
e52fcb24
ED
2022 mtu +
2023 BNX2X_FW_RX_ALIGN_END;
9b70de6d 2024 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
16a5fd92 2025 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
d46d132c
ED
2026 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2027 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2028 else
2029 fp->rx_frag_size = 0;
a8c94b91
VZ
2030 }
2031}
2032
60cad4e6 2033static int bnx2x_init_rss(struct bnx2x *bp)
619c5cb6
VZ
2034{
2035 int i;
619c5cb6
VZ
2036 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2037
16a5fd92 2038 /* Prepare the initial contents for the indirection table if RSS is
619c5cb6
VZ
2039 * enabled
2040 */
5d317c6a
MS
2041 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2042 bp->rss_conf_obj.ind_table[i] =
96305234
DK
2043 bp->fp->cl_id +
2044 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
2045
2046 /*
2047 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2048 * per-port, so if explicit configuration is needed , do it only
2049 * for a PMF.
2050 *
2051 * For 57712 and newer on the other hand it's a per-function
2052 * configuration.
2053 */
5d317c6a 2054 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
2055}
2056
60cad4e6
AE
2057int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2058 bool config_hash, bool enable)
619c5cb6 2059{
3b603066 2060 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
2061
2062 /* Although RSS is meaningless when there is a single HW queue we
2063 * still need it enabled in order to have HW Rx hash generated.
2064 *
2065 * if (!is_eth_multi(bp))
2066 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2067 */
2068
96305234 2069 params.rss_obj = rss_obj;
619c5cb6
VZ
2070
2071 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2072
60cad4e6
AE
2073 if (enable) {
2074 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2075
2076 /* RSS configuration */
2077 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2078 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2079 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2080 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2081 if (rss_obj->udp_rss_v4)
2082 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2083 if (rss_obj->udp_rss_v6)
2084 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
e42780b6 2085
28311f8e
YM
2086 if (!CHIP_IS_E1x(bp)) {
2087 /* valid only for TUNN_MODE_VXLAN tunnel mode */
2088 __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2089 __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2090
e42780b6 2091 /* valid only for TUNN_MODE_GRE tunnel mode */
28311f8e
YM
2092 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2093 }
60cad4e6
AE
2094 } else {
2095 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2096 }
619c5cb6 2097
96305234
DK
2098 /* Hash bits */
2099 params.rss_result_mask = MULTI_MASK;
619c5cb6 2100
5d317c6a 2101 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 2102
96305234
DK
2103 if (config_hash) {
2104 /* RSS keys */
e3ec69ca 2105 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
96305234 2106 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
2107 }
2108
60cad4e6
AE
2109 if (IS_PF(bp))
2110 return bnx2x_config_rss(bp, &params);
2111 else
2112 return bnx2x_vfpf_config_rss(bp, &params);
619c5cb6
VZ
2113}
2114
1191cb83 2115static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 2116{
3b603066 2117 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
2118
2119 /* Prepare parameters for function state transitions */
2120 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2121
2122 func_params.f_obj = &bp->func_obj;
2123 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2124
2125 func_params.params.hw_init.load_phase = load_code;
2126
2127 return bnx2x_func_state_change(bp, &func_params);
2128}
2129
2130/*
2131 * Cleans the object that have internal lists without sending
16a5fd92 2132 * ramrods. Should be run when interrupts are disabled.
619c5cb6 2133 */
7fa6f340 2134void bnx2x_squeeze_objects(struct bnx2x *bp)
619c5cb6
VZ
2135{
2136 int rc;
2137 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2138 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2139 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2140
2141 /***************** Cleanup MACs' object first *************************/
2142
2143 /* Wait for completion of requested */
2144 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2145 /* Perform a dry cleanup */
2146 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2147
2148 /* Clean ETH primary MAC */
2149 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2150 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2151 &ramrod_flags);
2152 if (rc != 0)
2153 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2154
2155 /* Cleanup UC list */
2156 vlan_mac_flags = 0;
2157 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2158 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2159 &ramrod_flags);
2160 if (rc != 0)
2161 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2162
2163 /***************** Now clean mcast object *****************************/
2164 rparam.mcast_obj = &bp->mcast_obj;
2165 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2166
8b09be5f
YM
2167 /* Add a DEL command... - Since we're doing a driver cleanup only,
2168 * we take a lock surrounding both the initial send and the CONTs,
2169 * as we don't want a true completion to disrupt us in the middle.
2170 */
2171 netif_addr_lock_bh(bp->dev);
619c5cb6
VZ
2172 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2173 if (rc < 0)
51c1a580
MS
2174 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2175 rc);
619c5cb6
VZ
2176
2177 /* ...and wait until all pending commands are cleared */
2178 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2179 while (rc != 0) {
2180 if (rc < 0) {
2181 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2182 rc);
8b09be5f 2183 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2184 return;
2185 }
2186
2187 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2188 }
8b09be5f 2189 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2190}
2191
2192#ifndef BNX2X_STOP_ON_ERROR
2193#define LOAD_ERROR_EXIT(bp, label) \
2194 do { \
2195 (bp)->state = BNX2X_STATE_ERROR; \
2196 goto label; \
2197 } while (0)
55c11941
MS
2198
2199#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2200 do { \
2201 bp->cnic_loaded = false; \
2202 goto label; \
2203 } while (0)
2204#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2205#define LOAD_ERROR_EXIT(bp, label) \
2206 do { \
2207 (bp)->state = BNX2X_STATE_ERROR; \
2208 (bp)->panic = 1; \
2209 return -EBUSY; \
2210 } while (0)
55c11941
MS
2211#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2212 do { \
2213 bp->cnic_loaded = false; \
2214 (bp)->panic = 1; \
2215 return -EBUSY; \
2216 } while (0)
2217#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2218
ad5afc89
AE
2219static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2220{
2221 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2222 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2223 return;
2224}
2225
2226static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2227{
8db573ba 2228 int num_groups, vf_headroom = 0;
ad5afc89 2229 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2230
ad5afc89
AE
2231 /* number of queues for statistics is number of eth queues + FCoE */
2232 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2233
ad5afc89
AE
2234 /* Total number of FW statistics requests =
2235 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2236 * and fcoe l2 queue) stats + num of queues (which includes another 1
2237 * for fcoe l2 queue if applicable)
2238 */
2239 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2240
8db573ba
AE
2241 /* vf stats appear in the request list, but their data is allocated by
2242 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2243 * it is used to determine where to place the vf stats queries in the
2244 * request struct
2245 */
2246 if (IS_SRIOV(bp))
6411280a 2247 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2248
ad5afc89
AE
2249 /* Request is built from stats_query_header and an array of
2250 * stats_query_cmd_group each of which contains
2251 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2252 * configured in the stats_query_header.
2253 */
2254 num_groups =
8db573ba
AE
2255 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2256 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2257 1 : 0));
2258
8db573ba
AE
2259 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2260 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2261 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2262 num_groups * sizeof(struct stats_query_cmd_group);
2263
2264 /* Data for statistics requests + stats_counter
2265 * stats_counter holds per-STORM counters that are incremented
2266 * when STORM has finished with the current request.
2267 * memory for FCoE offloaded statistics are counted anyway,
2268 * even if they will not be sent.
2269 * VF stats are not accounted for here as the data of VF stats is stored
2270 * in memory allocated by the VF, not here.
2271 */
2272 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2273 sizeof(struct per_pf_stats) +
2274 sizeof(struct fcoe_statistics_params) +
2275 sizeof(struct per_queue_stats) * num_queue_stats +
2276 sizeof(struct stats_counter);
2277
cd2b0389
JP
2278 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2279 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2280 if (!bp->fw_stats)
2281 goto alloc_mem_err;
ad5afc89
AE
2282
2283 /* Set shortcuts */
2284 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2285 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2286 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2287 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2288 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2289 bp->fw_stats_req_sz;
2290
6bf07b8e 2291 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
ad5afc89
AE
2292 U64_HI(bp->fw_stats_req_mapping),
2293 U64_LO(bp->fw_stats_req_mapping));
6bf07b8e 2294 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
ad5afc89
AE
2295 U64_HI(bp->fw_stats_data_mapping),
2296 U64_LO(bp->fw_stats_data_mapping));
2297 return 0;
2298
2299alloc_mem_err:
2300 bnx2x_free_fw_stats_mem(bp);
2301 BNX2X_ERR("Can't allocate FW stats memory\n");
2302 return -ENOMEM;
2303}
2304
2305/* send load request to mcp and analyze response */
2306static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2307{
178135c1
DK
2308 u32 param;
2309
ad5afc89
AE
2310 /* init fw_seq */
2311 bp->fw_seq =
2312 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2313 DRV_MSG_SEQ_NUMBER_MASK);
2314 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2315
2316 /* Get current FW pulse sequence */
2317 bp->fw_drv_pulse_wr_seq =
2318 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2319 DRV_PULSE_SEQ_MASK);
2320 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2321
178135c1
DK
2322 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2323
2324 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2325 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2326
ad5afc89 2327 /* load request */
178135c1 2328 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
ad5afc89
AE
2329
2330 /* if mcp fails to respond we must abort */
2331 if (!(*load_code)) {
2332 BNX2X_ERR("MCP response failure, aborting\n");
2333 return -EBUSY;
2334 }
2335
2336 /* If mcp refused (e.g. other port is in diagnostic mode) we
2337 * must abort
2338 */
2339 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2340 BNX2X_ERR("MCP refused load request, aborting\n");
2341 return -EBUSY;
2342 }
2343 return 0;
2344}
2345
2346/* check whether another PF has already loaded FW to chip. In
2347 * virtualized environments a pf from another VM may have already
2348 * initialized the device including loading FW
2349 */
91ebb929 2350int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
ad5afc89
AE
2351{
2352 /* is another pf loaded on this engine? */
2353 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2354 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2355 /* build my FW version dword */
2356 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2357 (BCM_5710_FW_MINOR_VERSION << 8) +
2358 (BCM_5710_FW_REVISION_VERSION << 16) +
2359 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2360
2361 /* read loaded FW from chip */
2362 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2363
2364 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2365 loaded_fw, my_fw);
2366
2367 /* abort nic load if version mismatch */
2368 if (my_fw != loaded_fw) {
91ebb929
YM
2369 if (print_err)
2370 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2371 loaded_fw, my_fw);
2372 else
2373 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2374 loaded_fw, my_fw);
ad5afc89
AE
2375 return -EBUSY;
2376 }
2377 }
2378 return 0;
2379}
2380
2381/* returns the "mcp load_code" according to global load_count array */
2382static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2383{
2384 int path = BP_PATH(bp);
2385
2386 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
a8f47eb7 2387 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2388 bnx2x_load_count[path][2]);
2389 bnx2x_load_count[path][0]++;
2390 bnx2x_load_count[path][1 + port]++;
ad5afc89 2391 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
a8f47eb7 2392 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2393 bnx2x_load_count[path][2]);
2394 if (bnx2x_load_count[path][0] == 1)
ad5afc89 2395 return FW_MSG_CODE_DRV_LOAD_COMMON;
a8f47eb7 2396 else if (bnx2x_load_count[path][1 + port] == 1)
ad5afc89
AE
2397 return FW_MSG_CODE_DRV_LOAD_PORT;
2398 else
2399 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2400}
2401
2402/* mark PMF if applicable */
2403static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2404{
2405 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2406 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2407 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2408 bp->port.pmf = 1;
2409 /* We need the barrier to ensure the ordering between the
2410 * writing to bp->port.pmf here and reading it from the
2411 * bnx2x_periodic_task().
2412 */
2413 smp_mb();
2414 } else {
2415 bp->port.pmf = 0;
452427b0
YM
2416 }
2417
ad5afc89
AE
2418 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2419}
2420
2421static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2422{
2423 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2424 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2425 (bp->common.shmem2_base)) {
2426 if (SHMEM2_HAS(bp, dcc_support))
2427 SHMEM2_WR(bp, dcc_support,
2428 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2429 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2430 if (SHMEM2_HAS(bp, afex_driver_support))
2431 SHMEM2_WR(bp, afex_driver_support,
2432 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2433 }
2434
2435 /* Set AFEX default VLAN tag to an invalid value */
2436 bp->afex_def_vlan_tag = -1;
452427b0
YM
2437}
2438
1191cb83
ED
2439/**
2440 * bnx2x_bz_fp - zero content of the fastpath structure.
2441 *
2442 * @bp: driver handle
2443 * @index: fastpath index to be zeroed
2444 *
2445 * Makes sure the contents of the bp->fp[index].napi is kept
2446 * intact.
2447 */
2448static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2449{
2450 struct bnx2x_fastpath *fp = &bp->fp[index];
65565884 2451 int cos;
1191cb83 2452 struct napi_struct orig_napi = fp->napi;
15192a8c 2453 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
d76a6111 2454
1191cb83 2455 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2456 if (fp->tpa_info)
2457 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2458 sizeof(struct bnx2x_agg_info));
2459 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2460
2461 /* Restore the NAPI object as it has been already initialized */
2462 fp->napi = orig_napi;
15192a8c 2463 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2464 fp->bp = bp;
2465 fp->index = index;
2466 if (IS_ETH_FP(fp))
2467 fp->max_cos = bp->max_cos;
2468 else
2469 /* Special queues support only one CoS */
2470 fp->max_cos = 1;
2471
65565884 2472 /* Init txdata pointers */
65565884
MS
2473 if (IS_FCOE_FP(fp))
2474 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2475 if (IS_ETH_FP(fp))
2476 for_each_cos_in_tx_queue(fp, cos)
2477 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2478 BNX2X_NUM_ETH_QUEUES(bp) + index];
2479
16a5fd92 2480 /* set the tpa flag for each queue. The tpa flag determines the queue
1191cb83
ED
2481 * minimal size so it must be set prior to queue memory allocation
2482 */
f8dcb5e3 2483 if (bp->dev->features & NETIF_F_LRO)
1191cb83 2484 fp->mode = TPA_MODE_LRO;
f8dcb5e3 2485 else if (bp->dev->features & NETIF_F_GRO &&
7e6b4d44 2486 bnx2x_mtu_allows_gro(bp->dev->mtu))
1191cb83 2487 fp->mode = TPA_MODE_GRO;
7e6b4d44
MS
2488 else
2489 fp->mode = TPA_MODE_DISABLED;
1191cb83 2490
22a8f237
MS
2491 /* We don't want TPA if it's disabled in bp
2492 * or if this is an FCoE L2 ring.
2493 */
2494 if (bp->disable_tpa || IS_FCOE_FP(fp))
7e6b4d44 2495 fp->mode = TPA_MODE_DISABLED;
55c11941
MS
2496}
2497
230d00eb
YM
2498void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2499{
2500 u32 cur;
2501
2502 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2503 return;
2504
2505 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2506 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2507 cur, state);
2508
2509 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2510}
2511
55c11941
MS
2512int bnx2x_load_cnic(struct bnx2x *bp)
2513{
2514 int i, rc, port = BP_PORT(bp);
2515
2516 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2517
2518 mutex_init(&bp->cnic_mutex);
2519
ad5afc89
AE
2520 if (IS_PF(bp)) {
2521 rc = bnx2x_alloc_mem_cnic(bp);
2522 if (rc) {
2523 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2524 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2525 }
55c11941
MS
2526 }
2527
2528 rc = bnx2x_alloc_fp_mem_cnic(bp);
2529 if (rc) {
2530 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2531 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2532 }
2533
2534 /* Update the number of queues with the cnic queues */
2535 rc = bnx2x_set_real_num_queues(bp, 1);
2536 if (rc) {
2537 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2538 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2539 }
2540
2541 /* Add all CNIC NAPI objects */
2542 bnx2x_add_all_napi_cnic(bp);
2543 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2544 bnx2x_napi_enable_cnic(bp);
2545
2546 rc = bnx2x_init_hw_func_cnic(bp);
2547 if (rc)
2548 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2549
2550 bnx2x_nic_init_cnic(bp);
2551
ad5afc89
AE
2552 if (IS_PF(bp)) {
2553 /* Enable Timer scan */
2554 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2555
2556 /* setup cnic queues */
2557 for_each_cnic_queue(bp, i) {
2558 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2559 if (rc) {
2560 BNX2X_ERR("Queue setup failed\n");
2561 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2562 }
55c11941
MS
2563 }
2564 }
2565
2566 /* Initialize Rx filter. */
8b09be5f 2567 bnx2x_set_rx_mode_inner(bp);
55c11941
MS
2568
2569 /* re-read iscsi info */
2570 bnx2x_get_iscsi_info(bp);
2571 bnx2x_setup_cnic_irq_info(bp);
2572 bnx2x_setup_cnic_info(bp);
2573 bp->cnic_loaded = true;
2574 if (bp->state == BNX2X_STATE_OPEN)
2575 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2576
55c11941
MS
2577 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2578
2579 return 0;
2580
2581#ifndef BNX2X_STOP_ON_ERROR
2582load_error_cnic2:
2583 /* Disable Timer scan */
2584 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2585
2586load_error_cnic1:
2587 bnx2x_napi_disable_cnic(bp);
2588 /* Update the number of queues without the cnic queues */
d9d81862 2589 if (bnx2x_set_real_num_queues(bp, 0))
55c11941
MS
2590 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2591load_error_cnic0:
2592 BNX2X_ERR("CNIC-related load failed\n");
2593 bnx2x_free_fp_mem_cnic(bp);
2594 bnx2x_free_mem_cnic(bp);
2595 return rc;
2596#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2597}
2598
9f6c9258
DK
2599/* must be called with rtnl_lock */
2600int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2601{
619c5cb6 2602 int port = BP_PORT(bp);
ad5afc89 2603 int i, rc = 0, load_code = 0;
9f6c9258 2604
55c11941
MS
2605 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2606 DP(NETIF_MSG_IFUP,
2607 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2608
9f6c9258 2609#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2610 if (unlikely(bp->panic)) {
2611 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2612 return -EPERM;
51c1a580 2613 }
9f6c9258
DK
2614#endif
2615
2616 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2617
16a5fd92 2618 /* zero the structure w/o any lock, before SP handler is initialized */
2ae17f66
VZ
2619 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2620 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2621 &bp->last_reported_link.link_report_flags);
2ae17f66 2622
ad5afc89
AE
2623 if (IS_PF(bp))
2624 /* must be called before memory allocation and HW init */
2625 bnx2x_ilt_set_info(bp);
523224a3 2626
6383c0b3
AE
2627 /*
2628 * Zero fastpath structures preserving invariants like napi, which are
2629 * allocated only once, fp index, max_cos, bp pointer.
7e6b4d44 2630 * Also set fp->mode and txdata_ptr.
b3b83c3f 2631 */
51c1a580 2632 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2633 for_each_queue(bp, i)
2634 bnx2x_bz_fp(bp, i);
55c11941
MS
2635 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2636 bp->num_cnic_queues) *
2637 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2638
55c11941 2639 bp->fcoe_init = false;
6383c0b3 2640
a8c94b91
VZ
2641 /* Set the receive queues buffer size */
2642 bnx2x_set_rx_buf_size(bp);
2643
ad5afc89
AE
2644 if (IS_PF(bp)) {
2645 rc = bnx2x_alloc_mem(bp);
2646 if (rc) {
2647 BNX2X_ERR("Unable to allocate bp memory\n");
2648 return rc;
2649 }
2650 }
2651
ad5afc89
AE
2652 /* need to be done after alloc mem, since it's self adjusting to amount
2653 * of memory available for RSS queues
2654 */
2655 rc = bnx2x_alloc_fp_mem(bp);
2656 if (rc) {
2657 BNX2X_ERR("Unable to allocate memory for fps\n");
2658 LOAD_ERROR_EXIT(bp, load_error0);
2659 }
d6214d7a 2660
e3ed4eae
DK
2661 /* Allocated memory for FW statistics */
2662 if (bnx2x_alloc_fw_stats_mem(bp))
2663 LOAD_ERROR_EXIT(bp, load_error0);
2664
8d9ac297
AE
2665 /* request pf to initialize status blocks */
2666 if (IS_VF(bp)) {
2667 rc = bnx2x_vfpf_init(bp);
2668 if (rc)
2669 LOAD_ERROR_EXIT(bp, load_error0);
2670 }
2671
b3b83c3f
DK
2672 /* As long as bnx2x_alloc_mem() may possibly update
2673 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2674 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2675 */
55c11941 2676 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2677 if (rc) {
ec6ba945 2678 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2679 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2680 }
2681
6383c0b3 2682 /* configure multi cos mappings in kernel.
16a5fd92
YM
2683 * this configuration may be overridden by a multi class queue
2684 * discipline or by a dcbx negotiation result.
6383c0b3
AE
2685 */
2686 bnx2x_setup_tc(bp->dev, bp->max_cos);
2687
26614ba5
MS
2688 /* Add all NAPI objects */
2689 bnx2x_add_all_napi(bp);
55c11941 2690 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2691 bnx2x_napi_enable(bp);
2692
ad5afc89
AE
2693 if (IS_PF(bp)) {
2694 /* set pf load just before approaching the MCP */
2695 bnx2x_set_pf_load(bp);
2696
2697 /* if mcp exists send load request and analyze response */
2698 if (!BP_NOMCP(bp)) {
2699 /* attempt to load pf */
2700 rc = bnx2x_nic_load_request(bp, &load_code);
2701 if (rc)
2702 LOAD_ERROR_EXIT(bp, load_error1);
2703
2704 /* what did mcp say? */
91ebb929 2705 rc = bnx2x_compare_fw_ver(bp, load_code, true);
ad5afc89
AE
2706 if (rc) {
2707 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2708 LOAD_ERROR_EXIT(bp, load_error2);
2709 }
ad5afc89
AE
2710 } else {
2711 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2712 }
9f6c9258 2713
ad5afc89
AE
2714 /* mark pmf if applicable */
2715 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2716
ad5afc89
AE
2717 /* Init Function state controlling object */
2718 bnx2x__init_func_obj(bp);
6383c0b3 2719
ad5afc89
AE
2720 /* Initialize HW */
2721 rc = bnx2x_init_hw(bp, load_code);
2722 if (rc) {
2723 BNX2X_ERR("HW init failed, aborting\n");
2724 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2725 LOAD_ERROR_EXIT(bp, load_error2);
2726 }
9f6c9258
DK
2727 }
2728
ecf01c22
YM
2729 bnx2x_pre_irq_nic_init(bp);
2730
d6214d7a
DK
2731 /* Connect to IRQs */
2732 rc = bnx2x_setup_irqs(bp);
523224a3 2733 if (rc) {
ad5afc89
AE
2734 BNX2X_ERR("setup irqs failed\n");
2735 if (IS_PF(bp))
2736 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2737 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2738 }
2739
619c5cb6 2740 /* Init per-function objects */
ad5afc89 2741 if (IS_PF(bp)) {
ecf01c22
YM
2742 /* Setup NIC internals and enable interrupts */
2743 bnx2x_post_irq_nic_init(bp, load_code);
2744
ad5afc89 2745 bnx2x_init_bp_objs(bp);
b56e9670 2746 bnx2x_iov_nic_init(bp);
a3348722 2747
ad5afc89
AE
2748 /* Set AFEX default VLAN tag to an invalid value */
2749 bp->afex_def_vlan_tag = -1;
2750 bnx2x_nic_load_afex_dcc(bp, load_code);
2751 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2752 rc = bnx2x_func_start(bp);
2753 if (rc) {
2754 BNX2X_ERR("Function start failed!\n");
2755 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2756
619c5cb6 2757 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2758 }
9f6c9258 2759
ad5afc89
AE
2760 /* Send LOAD_DONE command to MCP */
2761 if (!BP_NOMCP(bp)) {
2762 load_code = bnx2x_fw_command(bp,
2763 DRV_MSG_CODE_LOAD_DONE, 0);
2764 if (!load_code) {
2765 BNX2X_ERR("MCP response failure, aborting\n");
2766 rc = -EBUSY;
2767 LOAD_ERROR_EXIT(bp, load_error3);
2768 }
2769 }
9f6c9258 2770
0c14e5ce
AE
2771 /* initialize FW coalescing state machines in RAM */
2772 bnx2x_update_coalesce(bp);
60cad4e6 2773 }
0c14e5ce 2774
60cad4e6
AE
2775 /* setup the leading queue */
2776 rc = bnx2x_setup_leading(bp);
2777 if (rc) {
2778 BNX2X_ERR("Setup leading failed!\n");
2779 LOAD_ERROR_EXIT(bp, load_error3);
2780 }
ad5afc89 2781
60cad4e6
AE
2782 /* set up the rest of the queues */
2783 for_each_nondefault_eth_queue(bp, i) {
2784 if (IS_PF(bp))
2785 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2786 else /* VF */
2787 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
ad5afc89 2788 if (rc) {
60cad4e6 2789 BNX2X_ERR("Queue %d setup failed\n", i);
ad5afc89
AE
2790 LOAD_ERROR_EXIT(bp, load_error3);
2791 }
60cad4e6 2792 }
8d9ac297 2793
60cad4e6
AE
2794 /* setup rss */
2795 rc = bnx2x_init_rss(bp);
2796 if (rc) {
2797 BNX2X_ERR("PF RSS init failed\n");
2798 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2799 }
619c5cb6 2800
523224a3
DK
2801 /* Now when Clients are configured we are ready to work */
2802 bp->state = BNX2X_STATE_OPEN;
2803
619c5cb6 2804 /* Configure a ucast MAC */
ad5afc89
AE
2805 if (IS_PF(bp))
2806 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297 2807 else /* vf */
f8f4f61a
DK
2808 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2809 true);
51c1a580
MS
2810 if (rc) {
2811 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2812 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2813 }
6e30dd4e 2814
ad5afc89 2815 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2816 bnx2x_update_max_mf_config(bp, bp->pending_max);
2817 bp->pending_max = 0;
2818 }
2819
ad5afc89
AE
2820 if (bp->port.pmf) {
2821 rc = bnx2x_initial_phy_init(bp, load_mode);
2822 if (rc)
2823 LOAD_ERROR_EXIT(bp, load_error3);
2824 }
c63da990 2825 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2826
619c5cb6
VZ
2827 /* Start fast path */
2828
05cc5a39
YM
2829 /* Re-configure vlan filters */
2830 rc = bnx2x_vlan_reconfigure_vid(bp);
2831 if (rc)
2832 LOAD_ERROR_EXIT(bp, load_error3);
2833
619c5cb6 2834 /* Initialize Rx filter. */
8b09be5f 2835 bnx2x_set_rx_mode_inner(bp);
6e30dd4e 2836
eeed018c
MK
2837 if (bp->flags & PTP_SUPPORTED) {
2838 bnx2x_init_ptp(bp);
2839 bnx2x_configure_ptp_filters(bp);
2840 }
2841 /* Start Tx */
9f6c9258
DK
2842 switch (load_mode) {
2843 case LOAD_NORMAL:
16a5fd92 2844 /* Tx queue should be only re-enabled */
523224a3 2845 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2846 break;
2847
2848 case LOAD_OPEN:
2849 netif_tx_start_all_queues(bp->dev);
4e857c58 2850 smp_mb__after_atomic();
9f6c9258
DK
2851 break;
2852
2853 case LOAD_DIAG:
8970b2e4 2854 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2855 bp->state = BNX2X_STATE_DIAG;
2856 break;
2857
2858 default:
2859 break;
2860 }
2861
00253a8c 2862 if (bp->port.pmf)
4c704899 2863 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2864 else
9f6c9258
DK
2865 bnx2x__link_status_update(bp);
2866
2867 /* start the timer */
2868 mod_timer(&bp->timer, jiffies + bp->current_interval);
2869
55c11941
MS
2870 if (CNIC_ENABLED(bp))
2871 bnx2x_load_cnic(bp);
9f6c9258 2872
42f8277f
YM
2873 if (IS_PF(bp))
2874 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2875
ad5afc89
AE
2876 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2877 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2878 u32 val;
2879 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
230d00eb
YM
2880 val &= ~DRV_FLAGS_MTU_MASK;
2881 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
9ce392d4
YM
2882 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2883 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2884 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2885 }
2886
619c5cb6 2887 /* Wait for all pending SP commands to complete */
ad5afc89 2888 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2889 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2890 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2891 return -EBUSY;
2892 }
6891dd25 2893
c48f350f
YM
2894 /* Update driver data for On-Chip MFW dump. */
2895 if (IS_PF(bp))
2896 bnx2x_update_mfw_dump(bp);
2897
9876879f
BW
2898 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2899 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2900 bnx2x_dcbx_init(bp, false);
2901
230d00eb
YM
2902 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2903 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2904
55c11941
MS
2905 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2906
9f6c9258
DK
2907 return 0;
2908
619c5cb6 2909#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2910load_error3:
ad5afc89
AE
2911 if (IS_PF(bp)) {
2912 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2913
ad5afc89
AE
2914 /* Clean queueable objects */
2915 bnx2x_squeeze_objects(bp);
2916 }
619c5cb6 2917
9f6c9258
DK
2918 /* Free SKBs, SGEs, TPA pool and driver internals */
2919 bnx2x_free_skbs(bp);
ec6ba945 2920 for_each_rx_queue(bp, i)
9f6c9258 2921 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2922
9f6c9258 2923 /* Release IRQs */
d6214d7a
DK
2924 bnx2x_free_irq(bp);
2925load_error2:
ad5afc89 2926 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2927 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2928 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2929 }
2930
2931 bp->port.pmf = 0;
9f6c9258
DK
2932load_error1:
2933 bnx2x_napi_disable(bp);
722c6f58 2934 bnx2x_del_all_napi(bp);
ad5afc89 2935
889b9af3 2936 /* clear pf_load status, as it was already set */
ad5afc89
AE
2937 if (IS_PF(bp))
2938 bnx2x_clear_pf_load(bp);
d6214d7a 2939load_error0:
ad5afc89 2940 bnx2x_free_fw_stats_mem(bp);
e3ed4eae 2941 bnx2x_free_fp_mem(bp);
9f6c9258
DK
2942 bnx2x_free_mem(bp);
2943
2944 return rc;
619c5cb6 2945#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2946}
2947
7fa6f340 2948int bnx2x_drain_tx_queues(struct bnx2x *bp)
ad5afc89
AE
2949{
2950 u8 rc = 0, cos, i;
2951
2952 /* Wait until tx fastpath tasks complete */
2953 for_each_tx_queue(bp, i) {
2954 struct bnx2x_fastpath *fp = &bp->fp[i];
2955
2956 for_each_cos_in_tx_queue(fp, cos)
2957 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2958 if (rc)
2959 return rc;
2960 }
2961 return 0;
2962}
2963
9f6c9258 2964/* must be called with rtnl_lock */
5d07d868 2965int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2966{
2967 int i;
c9ee9206
VZ
2968 bool global = false;
2969
55c11941
MS
2970 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2971
230d00eb
YM
2972 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2973 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2974
9ce392d4 2975 /* mark driver is unloaded in shmem2 */
ad5afc89 2976 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2977 u32 val;
2978 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2979 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2980 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2981 }
2982
80bfe5cc 2983 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2984 (bp->state == BNX2X_STATE_CLOSED ||
2985 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2986 /* We can get here if the driver has been unloaded
2987 * during parity error recovery and is either waiting for a
2988 * leader to complete or for other functions to unload and
2989 * then ifdown has been issued. In this case we want to
2990 * unload and let other functions to complete a recovery
2991 * process.
2992 */
9f6c9258
DK
2993 bp->recovery_state = BNX2X_RECOVERY_DONE;
2994 bp->is_leader = 0;
c9ee9206
VZ
2995 bnx2x_release_leader_lock(bp);
2996 smp_mb();
2997
51c1a580
MS
2998 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2999 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
3000 return -EINVAL;
3001 }
3002
80bfe5cc 3003 /* Nothing to do during unload if previous bnx2x_nic_load()
16a5fd92 3004 * have not completed successfully - all resources are released.
80bfe5cc
YM
3005 *
3006 * we can get here only after unsuccessful ndo_* callback, during which
3007 * dev->IFF_UP flag is still on.
3008 */
3009 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3010 return 0;
3011
3012 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
3013 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3014 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3015 */
3016 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3017 smp_mb();
3018
78c3bcc5
AE
3019 /* indicate to VFs that the PF is going down */
3020 bnx2x_iov_channel_down(bp);
3021
55c11941
MS
3022 if (CNIC_LOADED(bp))
3023 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3024
9505ee37
VZ
3025 /* Stop Tx */
3026 bnx2x_tx_disable(bp);
65565884 3027 netdev_reset_tc(bp->dev);
9505ee37 3028
9f6c9258 3029 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 3030
9f6c9258 3031 del_timer_sync(&bp->timer);
f85582f8 3032
ad5afc89
AE
3033 if (IS_PF(bp)) {
3034 /* Set ALWAYS_ALIVE bit in shmem */
3035 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3036 bnx2x_drv_pulse(bp);
3037 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3038 bnx2x_save_statistics(bp);
3039 }
9f6c9258 3040
d78a1f08
YM
3041 /* wait till consumers catch up with producers in all queues.
3042 * If we're recovering, FW can't write to host so no reason
3043 * to wait for the queues to complete all Tx.
3044 */
3045 if (unload_mode != UNLOAD_RECOVERY)
3046 bnx2x_drain_tx_queues(bp);
9f6c9258 3047
9b176b6b
AE
3048 /* if VF indicate to PF this function is going down (PF will delete sp
3049 * elements and clear initializations
3050 */
3051 if (IS_VF(bp))
3052 bnx2x_vfpf_close_vf(bp);
3053 else if (unload_mode != UNLOAD_RECOVERY)
3054 /* if this is a normal/close unload need to clean up chip*/
5d07d868 3055 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 3056 else {
c9ee9206
VZ
3057 /* Send the UNLOAD_REQUEST to the MCP */
3058 bnx2x_send_unload_req(bp, unload_mode);
3059
16a5fd92 3060 /* Prevent transactions to host from the functions on the
c9ee9206 3061 * engine that doesn't reset global blocks in case of global
16a5fd92 3062 * attention once global blocks are reset and gates are opened
c9ee9206
VZ
3063 * (the engine which leader will perform the recovery
3064 * last).
3065 */
3066 if (!CHIP_IS_E1x(bp))
3067 bnx2x_pf_disable(bp);
3068
3069 /* Disable HW interrupts, NAPI */
523224a3 3070 bnx2x_netif_stop(bp, 1);
26614ba5
MS
3071 /* Delete all NAPI objects */
3072 bnx2x_del_all_napi(bp);
55c11941
MS
3073 if (CNIC_LOADED(bp))
3074 bnx2x_del_all_napi_cnic(bp);
523224a3 3075 /* Release IRQs */
d6214d7a 3076 bnx2x_free_irq(bp);
c9ee9206
VZ
3077
3078 /* Report UNLOAD_DONE to MCP */
5d07d868 3079 bnx2x_send_unload_done(bp, false);
523224a3 3080 }
9f6c9258 3081
619c5cb6 3082 /*
16a5fd92 3083 * At this stage no more interrupts will arrive so we may safely clean
619c5cb6
VZ
3084 * the queueable objects here in case they failed to get cleaned so far.
3085 */
ad5afc89
AE
3086 if (IS_PF(bp))
3087 bnx2x_squeeze_objects(bp);
619c5cb6 3088
79616895
VZ
3089 /* There should be no more pending SP commands at this stage */
3090 bp->sp_state = 0;
3091
9f6c9258
DK
3092 bp->port.pmf = 0;
3093
a0d307b2
DK
3094 /* clear pending work in rtnl task */
3095 bp->sp_rtnl_state = 0;
3096 smp_mb();
3097
9f6c9258
DK
3098 /* Free SKBs, SGEs, TPA pool and driver internals */
3099 bnx2x_free_skbs(bp);
55c11941
MS
3100 if (CNIC_LOADED(bp))
3101 bnx2x_free_skbs_cnic(bp);
ec6ba945 3102 for_each_rx_queue(bp, i)
9f6c9258 3103 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 3104
ad5afc89
AE
3105 bnx2x_free_fp_mem(bp);
3106 if (CNIC_LOADED(bp))
55c11941 3107 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 3108
ad5afc89 3109 if (IS_PF(bp)) {
ad5afc89
AE
3110 if (CNIC_LOADED(bp))
3111 bnx2x_free_mem_cnic(bp);
3112 }
b4cddbd6
AE
3113 bnx2x_free_mem(bp);
3114
9f6c9258 3115 bp->state = BNX2X_STATE_CLOSED;
55c11941 3116 bp->cnic_loaded = false;
9f6c9258 3117
42f8277f
YM
3118 /* Clear driver version indication in shmem */
3119 if (IS_PF(bp))
3120 bnx2x_update_mng_version(bp);
3121
c9ee9206
VZ
3122 /* Check if there are pending parity attentions. If there are - set
3123 * RECOVERY_IN_PROGRESS.
3124 */
ad5afc89 3125 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
3126 bnx2x_set_reset_in_progress(bp);
3127
3128 /* Set RESET_IS_GLOBAL if needed */
3129 if (global)
3130 bnx2x_set_reset_global(bp);
3131 }
3132
9f6c9258
DK
3133 /* The last driver must disable a "close the gate" if there is no
3134 * parity attention or "process kill" pending.
3135 */
ad5afc89
AE
3136 if (IS_PF(bp) &&
3137 !bnx2x_clear_pf_load(bp) &&
3138 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
3139 bnx2x_disable_close_the_gate(bp);
3140
55c11941
MS
3141 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3142
9f6c9258
DK
3143 return 0;
3144}
f85582f8 3145
9f6c9258
DK
3146int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3147{
3148 u16 pmcsr;
3149
adf5f6a1 3150 /* If there is no power capability, silently succeed */
29ed74c3 3151 if (!bp->pdev->pm_cap) {
51c1a580 3152 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
3153 return 0;
3154 }
3155
29ed74c3 3156 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
9f6c9258
DK
3157
3158 switch (state) {
3159 case PCI_D0:
29ed74c3 3160 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3161 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3162 PCI_PM_CTRL_PME_STATUS));
3163
3164 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3165 /* delay required during transition out of D3hot */
3166 msleep(20);
3167 break;
3168
3169 case PCI_D3hot:
3170 /* If there are other clients above don't
3171 shut down the power */
3172 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3173 return 0;
3174 /* Don't shut down the power for emulation and FPGA */
3175 if (CHIP_REV_IS_SLOW(bp))
3176 return 0;
3177
3178 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3179 pmcsr |= 3;
3180
3181 if (bp->wol)
3182 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3183
29ed74c3 3184 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3185 pmcsr);
3186
3187 /* No more memory access after this point until
3188 * device is brought back to D0.
3189 */
3190 break;
3191
3192 default:
51c1a580 3193 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3194 return -EINVAL;
3195 }
3196 return 0;
3197}
3198
9f6c9258
DK
3199/*
3200 * net_device service functions
3201 */
a8f47eb7 3202static int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258 3203{
9f6c9258
DK
3204 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3205 napi);
3206 struct bnx2x *bp = fp->bp;
4d6acb62
ED
3207 int rx_work_done;
3208 u8 cos;
9f6c9258 3209
9f6c9258 3210#ifdef BNX2X_STOP_ON_ERROR
4d6acb62
ED
3211 if (unlikely(bp->panic)) {
3212 napi_complete(napi);
3213 return 0;
3214 }
9f6c9258 3215#endif
4d6acb62
ED
3216 for_each_cos_in_tx_queue(fp, cos)
3217 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3218 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
9f6c9258 3219
4d6acb62 3220 rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
55c11941 3221
4d6acb62
ED
3222 if (rx_work_done < budget) {
3223 /* No need to update SB for FCoE L2 ring as long as
3224 * it's connected to the default SB and the SB
3225 * has been updated when NAPI was scheduled.
3226 */
3227 if (IS_FCOE_FP(fp)) {
6ad20165 3228 napi_complete_done(napi, rx_work_done);
4d6acb62 3229 } else {
9f6c9258 3230 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3231 /* bnx2x_has_rx_work() reads the status block,
3232 * thus we need to ensure that status block indices
3233 * have been actually read (bnx2x_update_fpsb_idx)
3234 * prior to this check (bnx2x_has_rx_work) so that
3235 * we won't write the "newer" value of the status block
3236 * to IGU (if there was a DMA right after
3237 * bnx2x_has_rx_work and if there is no rmb, the memory
3238 * reading (bnx2x_update_fpsb_idx) may be postponed
3239 * to right before bnx2x_ack_sb). In this case there
3240 * will never be another interrupt until there is
3241 * another update of the status block, while there
3242 * is still unhandled work.
3243 */
9f6c9258
DK
3244 rmb();
3245
3246 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
80f1c21c
ED
3247 if (napi_complete_done(napi, rx_work_done)) {
3248 /* Re-enable interrupts */
3249 DP(NETIF_MSG_RX_STATUS,
3250 "Update index to %d\n", fp->fp_hc_idx);
3251 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3252 le16_to_cpu(fp->fp_hc_idx),
3253 IGU_INT_ENABLE, 1);
3254 }
4d6acb62
ED
3255 } else {
3256 rx_work_done = budget;
9f6c9258
DK
3257 }
3258 }
3259 }
3260
4d6acb62 3261 return rx_work_done;
9f6c9258
DK
3262}
3263
9f6c9258
DK
3264/* we split the first BD into headers and data BDs
3265 * to ease the pain of our fellow microcode engineers
3266 * we use one mapping for both BDs
9f6c9258 3267 */
91226790
DK
3268static u16 bnx2x_tx_split(struct bnx2x *bp,
3269 struct bnx2x_fp_txdata *txdata,
3270 struct sw_tx_bd *tx_buf,
3271 struct eth_tx_start_bd **tx_bd, u16 hlen,
3272 u16 bd_prod)
9f6c9258
DK
3273{
3274 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3275 struct eth_tx_bd *d_tx_bd;
3276 dma_addr_t mapping;
3277 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3278
3279 /* first fix first BD */
9f6c9258
DK
3280 h_tx_bd->nbytes = cpu_to_le16(hlen);
3281
91226790
DK
3282 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3283 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3284
3285 /* now get a new data BD
3286 * (after the pbd) and fill it */
3287 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3288 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3289
3290 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3291 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3292
3293 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3294 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3295 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3296
3297 /* this marks the BD as one that has no individual mapping */
3298 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3299
3300 DP(NETIF_MSG_TX_QUEUED,
3301 "TSO split data size is %d (%x:%x)\n",
3302 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3303
3304 /* update tx_bd */
3305 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3306
3307 return bd_prod;
3308}
3309
86564c3f
YM
3310#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3311#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3312static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3313{
86564c3f
YM
3314 __sum16 tsum = (__force __sum16) csum;
3315
9f6c9258 3316 if (fix > 0)
86564c3f
YM
3317 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3318 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3319
3320 else if (fix < 0)
86564c3f
YM
3321 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3322 csum_partial(t_header, -fix, 0)));
9f6c9258 3323
e2593fcd 3324 return bswab16(tsum);
9f6c9258
DK
3325}
3326
91226790 3327static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3328{
3329 u32 rc;
a848ade4
DK
3330 __u8 prot = 0;
3331 __be16 protocol;
9f6c9258
DK
3332
3333 if (skb->ip_summed != CHECKSUM_PARTIAL)
a848ade4 3334 return XMIT_PLAIN;
9f6c9258 3335
a848ade4
DK
3336 protocol = vlan_get_protocol(skb);
3337 if (protocol == htons(ETH_P_IPV6)) {
3338 rc = XMIT_CSUM_V6;
3339 prot = ipv6_hdr(skb)->nexthdr;
3340 } else {
3341 rc = XMIT_CSUM_V4;
3342 prot = ip_hdr(skb)->protocol;
3343 }
9f6c9258 3344
a848ade4
DK
3345 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3346 if (inner_ip_hdr(skb)->version == 6) {
3347 rc |= XMIT_CSUM_ENC_V6;
3348 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3349 rc |= XMIT_CSUM_TCP;
9f6c9258 3350 } else {
a848ade4
DK
3351 rc |= XMIT_CSUM_ENC_V4;
3352 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
9f6c9258
DK
3353 rc |= XMIT_CSUM_TCP;
3354 }
3355 }
a848ade4
DK
3356 if (prot == IPPROTO_TCP)
3357 rc |= XMIT_CSUM_TCP;
9f6c9258 3358
36a8f39e
ED
3359 if (skb_is_gso(skb)) {
3360 if (skb_is_gso_v6(skb)) {
3361 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3362 if (rc & XMIT_CSUM_ENC)
3363 rc |= XMIT_GSO_ENC_V6;
3364 } else {
3365 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3366 if (rc & XMIT_CSUM_ENC)
3367 rc |= XMIT_GSO_ENC_V4;
3368 }
a848ade4 3369 }
9f6c9258
DK
3370
3371 return rc;
3372}
3373
ea2465af
YM
3374/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3375#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3376
3377/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3378#define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3379
3380#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3381/* check if packet requires linearization (packet is too fragmented)
3382 no need to check fragmentation if page size > 8K (there will be no
3383 violation to FW restrictions) */
3384static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3385 u32 xmit_type)
3386{
ea2465af
YM
3387 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3388 int to_copy = 0, hlen = 0;
9f6c9258 3389
ea2465af
YM
3390 if (xmit_type & XMIT_GSO_ENC)
3391 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
9f6c9258 3392
ea2465af 3393 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
9f6c9258
DK
3394 if (xmit_type & XMIT_GSO) {
3395 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
ea2465af 3396 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
9f6c9258
DK
3397 /* Number of windows to check */
3398 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3399 int wnd_idx = 0;
3400 int frag_idx = 0;
3401 u32 wnd_sum = 0;
3402
3403 /* Headers length */
592b9b8d
YM
3404 if (xmit_type & XMIT_GSO_ENC)
3405 hlen = (int)(skb_inner_transport_header(skb) -
3406 skb->data) +
3407 inner_tcp_hdrlen(skb);
3408 else
3409 hlen = (int)(skb_transport_header(skb) -
3410 skb->data) + tcp_hdrlen(skb);
9f6c9258
DK
3411
3412 /* Amount of data (w/o headers) on linear part of SKB*/
3413 first_bd_sz = skb_headlen(skb) - hlen;
3414
3415 wnd_sum = first_bd_sz;
3416
3417 /* Calculate the first sum - it's special */
3418 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3419 wnd_sum +=
9e903e08 3420 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3421
3422 /* If there was data on linear skb data - check it */
3423 if (first_bd_sz > 0) {
3424 if (unlikely(wnd_sum < lso_mss)) {
3425 to_copy = 1;
3426 goto exit_lbl;
3427 }
3428
3429 wnd_sum -= first_bd_sz;
3430 }
3431
3432 /* Others are easier: run through the frag list and
3433 check all windows */
3434 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3435 wnd_sum +=
9e903e08 3436 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3437
3438 if (unlikely(wnd_sum < lso_mss)) {
3439 to_copy = 1;
3440 break;
3441 }
3442 wnd_sum -=
9e903e08 3443 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3444 }
3445 } else {
3446 /* in non-LSO too fragmented packet should always
3447 be linearized */
3448 to_copy = 1;
3449 }
3450 }
3451
3452exit_lbl:
3453 if (unlikely(to_copy))
3454 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3455 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3456 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3457 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3458
3459 return to_copy;
3460}
3461#endif
3462
f2e0899f 3463/**
e8920674 3464 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3465 *
e8920674
DK
3466 * @skb: packet skb
3467 * @pbd: parse BD
3468 * @xmit_type: xmit flags
f2e0899f 3469 */
91226790
DK
3470static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3471 struct eth_tx_parse_bd_e1x *pbd,
3472 u32 xmit_type)
f2e0899f
DK
3473{
3474 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3475 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3476 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3477
3478 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3479 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3480 pbd->tcp_pseudo_csum =
86564c3f
YM
3481 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3482 ip_hdr(skb)->daddr,
3483 0, IPPROTO_TCP, 0));
057cf65e 3484 } else {
f2e0899f 3485 pbd->tcp_pseudo_csum =
86564c3f
YM
3486 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3487 &ipv6_hdr(skb)->daddr,
3488 0, IPPROTO_TCP, 0));
057cf65e 3489 }
f2e0899f 3490
86564c3f
YM
3491 pbd->global_data |=
3492 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3493}
f85582f8 3494
a848ade4
DK
3495/**
3496 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3497 *
3498 * @bp: driver handle
3499 * @skb: packet skb
3500 * @parsing_data: data to be updated
3501 * @xmit_type: xmit flags
3502 *
3503 * 57712/578xx related, when skb has encapsulation
3504 */
3505static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3506 u32 *parsing_data, u32 xmit_type)
3507{
3508 *parsing_data |=
3509 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3510 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3511 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3512
3513 if (xmit_type & XMIT_CSUM_TCP) {
3514 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3515 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3516 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3517
3518 return skb_inner_transport_header(skb) +
3519 inner_tcp_hdrlen(skb) - skb->data;
3520 }
3521
3522 /* We support checksum offload for TCP and UDP only.
3523 * No need to pass the UDP header length - it's a constant.
3524 */
3525 return skb_inner_transport_header(skb) +
3526 sizeof(struct udphdr) - skb->data;
3527}
3528
f2e0899f 3529/**
e8920674 3530 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3531 *
e8920674
DK
3532 * @bp: driver handle
3533 * @skb: packet skb
3534 * @parsing_data: data to be updated
3535 * @xmit_type: xmit flags
f2e0899f 3536 *
91226790 3537 * 57712/578xx related
f2e0899f 3538 */
91226790
DK
3539static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3540 u32 *parsing_data, u32 xmit_type)
f2e0899f 3541{
e39aece7 3542 *parsing_data |=
2de67439 3543 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3544 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3545 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3546
e39aece7
VZ
3547 if (xmit_type & XMIT_CSUM_TCP) {
3548 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3549 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3550 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3551
e39aece7 3552 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3553 }
3554 /* We support checksum offload for TCP and UDP only.
3555 * No need to pass the UDP header length - it's a constant.
3556 */
3557 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3558}
3559
a848ade4 3560/* set FW indication according to inner or outer protocols if tunneled */
91226790
DK
3561static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3562 struct eth_tx_start_bd *tx_start_bd,
3563 u32 xmit_type)
93ef5c02 3564{
93ef5c02
DK
3565 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3566
a848ade4 3567 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
91226790 3568 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3569
3570 if (!(xmit_type & XMIT_CSUM_TCP))
3571 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3572}
3573
f2e0899f 3574/**
e8920674 3575 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3576 *
e8920674
DK
3577 * @bp: driver handle
3578 * @skb: packet skb
3579 * @pbd: parse BD to be updated
3580 * @xmit_type: xmit flags
f2e0899f 3581 */
91226790
DK
3582static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3583 struct eth_tx_parse_bd_e1x *pbd,
3584 u32 xmit_type)
f2e0899f 3585{
e39aece7 3586 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3587
3588 /* for now NS flag is not used in Linux */
3589 pbd->global_data =
86564c3f
YM
3590 cpu_to_le16(hlen |
3591 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3592 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3593
3594 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3595 skb_network_header(skb)) >> 1;
f2e0899f 3596
e39aece7
VZ
3597 hlen += pbd->ip_hlen_w;
3598
3599 /* We support checksum offload for TCP and UDP only */
3600 if (xmit_type & XMIT_CSUM_TCP)
3601 hlen += tcp_hdrlen(skb) / 2;
3602 else
3603 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3604
3605 pbd->total_hlen_w = cpu_to_le16(hlen);
3606 hlen = hlen*2;
3607
3608 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3609 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3610
3611 } else {
3612 s8 fix = SKB_CS_OFF(skb); /* signed! */
3613
3614 DP(NETIF_MSG_TX_QUEUED,
3615 "hlen %d fix %d csum before fix %x\n",
3616 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3617
3618 /* HW bug: fixup the CSUM */
3619 pbd->tcp_pseudo_csum =
3620 bnx2x_csum_fix(skb_transport_header(skb),
3621 SKB_CS(skb), fix);
3622
3623 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3624 pbd->tcp_pseudo_csum);
3625 }
3626
3627 return hlen;
3628}
f85582f8 3629
a848ade4
DK
3630static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3631 struct eth_tx_parse_bd_e2 *pbd_e2,
3632 struct eth_tx_parse_2nd_bd *pbd2,
3633 u16 *global_data,
3634 u32 xmit_type)
3635{
e287a75c 3636 u16 hlen_w = 0;
a848ade4 3637 u8 outerip_off, outerip_len = 0;
e768fb29 3638
e287a75c
DK
3639 /* from outer IP to transport */
3640 hlen_w = (skb_inner_transport_header(skb) -
3641 skb_network_header(skb)) >> 1;
a848ade4
DK
3642
3643 /* transport len */
e768fb29 3644 hlen_w += inner_tcp_hdrlen(skb) >> 1;
a848ade4 3645
e287a75c 3646 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
a848ade4 3647
e768fb29
DK
3648 /* outer IP header info */
3649 if (xmit_type & XMIT_CSUM_V4) {
e287a75c 3650 struct iphdr *iph = ip_hdr(skb);
1b4fc0e2
DK
3651 u32 csum = (__force u32)(~iph->check) -
3652 (__force u32)iph->tot_len -
3653 (__force u32)iph->frag_off;
c957d09f 3654
e42780b6
DK
3655 outerip_len = iph->ihl << 1;
3656
a848ade4 3657 pbd2->fw_ip_csum_wo_len_flags_frag =
c957d09f 3658 bswab16(csum_fold((__force __wsum)csum));
a848ade4
DK
3659 } else {
3660 pbd2->fw_ip_hdr_to_payload_w =
e287a75c 3661 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
e42780b6 3662 pbd_e2->data.tunnel_data.flags |=
28311f8e 3663 ETH_TUNNEL_DATA_IPV6_OUTER;
a848ade4
DK
3664 }
3665
3666 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3667
3668 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3669
e42780b6
DK
3670 /* inner IP header info */
3671 if (xmit_type & XMIT_CSUM_ENC_V4) {
e287a75c 3672 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
a848ade4
DK
3673
3674 pbd_e2->data.tunnel_data.pseudo_csum =
3675 bswab16(~csum_tcpudp_magic(
3676 inner_ip_hdr(skb)->saddr,
3677 inner_ip_hdr(skb)->daddr,
3678 0, IPPROTO_TCP, 0));
a848ade4
DK
3679 } else {
3680 pbd_e2->data.tunnel_data.pseudo_csum =
3681 bswab16(~csum_ipv6_magic(
3682 &inner_ipv6_hdr(skb)->saddr,
3683 &inner_ipv6_hdr(skb)->daddr,
3684 0, IPPROTO_TCP, 0));
3685 }
3686
3687 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3688
3689 *global_data |=
3690 outerip_off |
a848ade4
DK
3691 (outerip_len <<
3692 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3693 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3694 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
65bc0cfe
DK
3695
3696 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3697 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3698 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3699 }
a848ade4
DK
3700}
3701
e42780b6
DK
3702static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3703 u32 xmit_type)
3704{
3705 struct ipv6hdr *ipv6;
3706
3707 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3708 return;
3709
3710 if (xmit_type & XMIT_GSO_ENC_V6)
3711 ipv6 = inner_ipv6_hdr(skb);
3712 else /* XMIT_GSO_V6 */
3713 ipv6 = ipv6_hdr(skb);
3714
3715 if (ipv6->nexthdr == NEXTHDR_IPV6)
3716 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3717}
3718
9f6c9258
DK
3719/* called with netif_tx_lock
3720 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3721 * netif_wake_queue()
3722 */
3723netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3724{
3725 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3726
9f6c9258 3727 struct netdev_queue *txq;
6383c0b3 3728 struct bnx2x_fp_txdata *txdata;
9f6c9258 3729 struct sw_tx_bd *tx_buf;
619c5cb6 3730 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3731 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3732 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3733 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
a848ade4 3734 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
2297a2da 3735 u32 pbd_e2_parsing_data = 0;
9f6c9258 3736 u16 pkt_prod, bd_prod;
65565884 3737 int nbd, txq_index;
9f6c9258
DK
3738 dma_addr_t mapping;
3739 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3740 int i;
3741 u8 hlen = 0;
3742 __le16 pkt_size = 0;
3743 struct ethhdr *eth;
3744 u8 mac_type = UNICAST_ADDRESS;
3745
3746#ifdef BNX2X_STOP_ON_ERROR
3747 if (unlikely(bp->panic))
3748 return NETDEV_TX_BUSY;
3749#endif
3750
6383c0b3
AE
3751 txq_index = skb_get_queue_mapping(skb);
3752 txq = netdev_get_tx_queue(dev, txq_index);
3753
55c11941 3754 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3755
65565884 3756 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3757
3758 /* enable this debug print to view the transmission queue being used
51c1a580 3759 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3760 txq_index, fp_index, txdata_index); */
9f6c9258 3761
16a5fd92 3762 /* enable this debug print to view the transmission details
51c1a580
MS
3763 DP(NETIF_MSG_TX_QUEUED,
3764 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3765 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3766
6383c0b3 3767 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3768 skb_shinfo(skb)->nr_frags +
3769 BDS_PER_TX_PKT +
3770 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3771 /* Handle special storage cases separately */
c96bdc0c
DK
3772 if (txdata->tx_ring_size == 0) {
3773 struct bnx2x_eth_q_stats *q_stats =
3774 bnx2x_fp_qstats(bp, txdata->parent_fp);
3775 q_stats->driver_filtered_tx_pkt++;
3776 dev_kfree_skb(skb);
3777 return NETDEV_TX_OK;
3778 }
2de67439
YM
3779 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3780 netif_tx_stop_queue(txq);
c96bdc0c 3781 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3782
9f6c9258
DK
3783 return NETDEV_TX_BUSY;
3784 }
3785
51c1a580 3786 DP(NETIF_MSG_TX_QUEUED,
04c46736 3787 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3788 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3789 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3790 skb->len);
9f6c9258
DK
3791
3792 eth = (struct ethhdr *)skb->data;
3793
3794 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3795 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3796 if (is_broadcast_ether_addr(eth->h_dest))
3797 mac_type = BROADCAST_ADDRESS;
3798 else
3799 mac_type = MULTICAST_ADDRESS;
3800 }
3801
91226790 3802#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3803 /* First, check if we need to linearize the skb (due to FW
3804 restrictions). No need to check fragmentation if page size > 8K
3805 (there will be no violation to FW restrictions) */
3806 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3807 /* Statistics of linearization */
3808 bp->lin_cnt++;
3809 if (skb_linearize(skb) != 0) {
51c1a580
MS
3810 DP(NETIF_MSG_TX_QUEUED,
3811 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3812 dev_kfree_skb_any(skb);
3813 return NETDEV_TX_OK;
3814 }
3815 }
3816#endif
619c5cb6
VZ
3817 /* Map skb linear data for DMA */
3818 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3819 skb_headlen(skb), DMA_TO_DEVICE);
3820 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3821 DP(NETIF_MSG_TX_QUEUED,
3822 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3823 dev_kfree_skb_any(skb);
3824 return NETDEV_TX_OK;
3825 }
9f6c9258
DK
3826 /*
3827 Please read carefully. First we use one BD which we mark as start,
3828 then we have a parsing info BD (used for TSO or xsum),
3829 and only then we have the rest of the TSO BDs.
3830 (don't forget to mark the last one as last,
3831 and to unmap only AFTER you write to the BD ...)
3832 And above all, all pdb sizes are in words - NOT DWORDS!
3833 */
3834
619c5cb6
VZ
3835 /* get current pkt produced now - advance it just before sending packet
3836 * since mapping of pages may fail and cause packet to be dropped
3837 */
6383c0b3
AE
3838 pkt_prod = txdata->tx_pkt_prod;
3839 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3840
619c5cb6
VZ
3841 /* get a tx_buf and first BD
3842 * tx_start_bd may be changed during SPLIT,
3843 * but first_bd will always stay first
3844 */
6383c0b3
AE
3845 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3846 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3847 first_bd = tx_start_bd;
9f6c9258
DK
3848
3849 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3850
eeed018c
MK
3851 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3852 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3853 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3854 } else if (bp->ptp_tx_skb) {
3855 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3856 } else {
3857 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3858 /* schedule check for Tx timestamp */
3859 bp->ptp_tx_skb = skb_get(skb);
3860 bp->ptp_tx_start = jiffies;
3861 schedule_work(&bp->ptp_task);
3862 }
3863 }
3864
91226790
DK
3865 /* header nbd: indirectly zero other flags! */
3866 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3867
3868 /* remember the first BD of the packet */
6383c0b3 3869 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3870 tx_buf->skb = skb;
3871 tx_buf->flags = 0;
3872
3873 DP(NETIF_MSG_TX_QUEUED,
3874 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3875 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3876
df8a39de 3877 if (skb_vlan_tag_present(skb)) {
523224a3 3878 tx_start_bd->vlan_or_ethertype =
df8a39de 3879 cpu_to_le16(skb_vlan_tag_get(skb));
523224a3
DK
3880 tx_start_bd->bd_flags.as_bitfield |=
3881 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3882 } else {
3883 /* when transmitting in a vf, start bd must hold the ethertype
3884 * for fw to enforce it
3885 */
ea36475a 3886#ifndef BNX2X_STOP_ON_ERROR
91226790 3887 if (IS_VF(bp))
ea36475a 3888#endif
dc1ba591
AE
3889 tx_start_bd->vlan_or_ethertype =
3890 cpu_to_le16(ntohs(eth->h_proto));
ea36475a 3891#ifndef BNX2X_STOP_ON_ERROR
91226790 3892 else
dc1ba591
AE
3893 /* used by FW for packet accounting */
3894 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
ea36475a 3895#endif
dc1ba591 3896 }
9f6c9258 3897
91226790
DK
3898 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3899
9f6c9258
DK
3900 /* turn on parsing and get a BD */
3901 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3902
93ef5c02
DK
3903 if (xmit_type & XMIT_CSUM)
3904 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3905
619c5cb6 3906 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3907 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f 3908 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
a848ade4
DK
3909
3910 if (xmit_type & XMIT_CSUM_ENC) {
3911 u16 global_data = 0;
3912
3913 /* Set PBD in enc checksum offload case */
3914 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3915 &pbd_e2_parsing_data,
3916 xmit_type);
3917
3918 /* turn on 2nd parsing and get a BD */
3919 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3920
3921 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3922
3923 memset(pbd2, 0, sizeof(*pbd2));
3924
3925 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3926 (skb_inner_network_header(skb) -
3927 skb->data) >> 1;
3928
3929 if (xmit_type & XMIT_GSO_ENC)
3930 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3931 &global_data,
3932 xmit_type);
3933
3934 pbd2->global_data = cpu_to_le16(global_data);
3935
3936 /* add addition parse BD indication to start BD */
3937 SET_FLAG(tx_start_bd->general_data,
3938 ETH_TX_START_BD_PARSE_NBDS, 1);
3939 /* set encapsulation flag in start BD */
3940 SET_FLAG(tx_start_bd->general_data,
3941 ETH_TX_START_BD_TUNNEL_EXIST, 1);
fe26566d
DK
3942
3943 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3944
a848ade4
DK
3945 nbd++;
3946 } else if (xmit_type & XMIT_CSUM) {
91226790 3947 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3948 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3949 &pbd_e2_parsing_data,
3950 xmit_type);
a848ade4 3951 }
dc1ba591 3952
e42780b6 3953 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
babe723d
YM
3954 /* Add the macs to the parsing BD if this is a vf or if
3955 * Tx Switching is enabled.
3956 */
91226790
DK
3957 if (IS_VF(bp)) {
3958 /* override GRE parameters in BD */
3959 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3960 &pbd_e2->data.mac_addr.src_mid,
3961 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 3962 eth->h_source);
91226790 3963
babe723d
YM
3964 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3965 &pbd_e2->data.mac_addr.dst_mid,
3966 &pbd_e2->data.mac_addr.dst_lo,
3967 eth->h_dest);
ea36475a
YM
3968 } else {
3969 if (bp->flags & TX_SWITCHING)
3970 bnx2x_set_fw_mac_addr(
3971 &pbd_e2->data.mac_addr.dst_hi,
3972 &pbd_e2->data.mac_addr.dst_mid,
3973 &pbd_e2->data.mac_addr.dst_lo,
3974 eth->h_dest);
3975#ifdef BNX2X_STOP_ON_ERROR
3976 /* Enforce security is always set in Stop on Error -
3977 * source mac should be present in the parsing BD
3978 */
3979 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3980 &pbd_e2->data.mac_addr.src_mid,
3981 &pbd_e2->data.mac_addr.src_lo,
3982 eth->h_source);
3983#endif
619c5cb6 3984 }
96bed4b9
YM
3985
3986 SET_FLAG(pbd_e2_parsing_data,
3987 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3988 } else {
96bed4b9 3989 u16 global_data = 0;
6383c0b3 3990 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3991 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3992 /* Set PBD in checksum offload case */
3993 if (xmit_type & XMIT_CSUM)
3994 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3995
96bed4b9
YM
3996 SET_FLAG(global_data,
3997 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3998 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3999 }
4000
f85582f8 4001 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
4002 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4003 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
4004 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4005 pkt_size = tx_start_bd->nbytes;
4006
51c1a580 4007 DP(NETIF_MSG_TX_QUEUED,
91226790 4008 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 4009 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 4010 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
4011 tx_start_bd->bd_flags.as_bitfield,
4012 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
4013
4014 if (xmit_type & XMIT_GSO) {
4015
4016 DP(NETIF_MSG_TX_QUEUED,
4017 "TSO packet len %d hlen %d total len %d tso size %d\n",
4018 skb->len, hlen, skb_headlen(skb),
4019 skb_shinfo(skb)->gso_size);
4020
4021 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4022
91226790
DK
4023 if (unlikely(skb_headlen(skb) > hlen)) {
4024 nbd++;
6383c0b3
AE
4025 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4026 &tx_start_bd, hlen,
91226790
DK
4027 bd_prod);
4028 }
619c5cb6 4029 if (!CHIP_IS_E1x(bp))
e42780b6
DK
4030 pbd_e2_parsing_data |=
4031 (skb_shinfo(skb)->gso_size <<
4032 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4033 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f 4034 else
e42780b6 4035 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 4036 }
2297a2da
VZ
4037
4038 /* Set the PBD's parsing_data field if not zero
4039 * (for the chips newer than 57711).
4040 */
4041 if (pbd_e2_parsing_data)
4042 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4043
9f6c9258
DK
4044 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4045
f85582f8 4046 /* Handle fragmented skb */
9f6c9258
DK
4047 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4048 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4049
9e903e08
ED
4050 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4051 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 4052 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 4053 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 4054
51c1a580
MS
4055 DP(NETIF_MSG_TX_QUEUED,
4056 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
4057
4058 /* we need unmap all buffers already mapped
4059 * for this SKB;
4060 * first_bd->nbd need to be properly updated
4061 * before call to bnx2x_free_tx_pkt
4062 */
4063 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 4064 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
4065 TX_BD(txdata->tx_pkt_prod),
4066 &pkts_compl, &bytes_compl);
619c5cb6
VZ
4067 return NETDEV_TX_OK;
4068 }
4069
9f6c9258 4070 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 4071 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 4072 if (total_pkt_bd == NULL)
6383c0b3 4073 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 4074
9f6c9258
DK
4075 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4076 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
4077 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4078 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 4079 nbd++;
9f6c9258
DK
4080
4081 DP(NETIF_MSG_TX_QUEUED,
4082 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4083 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4084 le16_to_cpu(tx_data_bd->nbytes));
4085 }
4086
4087 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4088
619c5cb6
VZ
4089 /* update with actual num BDs */
4090 first_bd->nbd = cpu_to_le16(nbd);
4091
9f6c9258
DK
4092 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4093
4094 /* now send a tx doorbell, counting the next BD
4095 * if the packet contains or ends with it
4096 */
4097 if (TX_BD_POFF(bd_prod) < nbd)
4098 nbd++;
4099
619c5cb6
VZ
4100 /* total_pkt_bytes should be set on the first data BD if
4101 * it's not an LSO packet and there is more than one
4102 * data BD. In this case pkt_size is limited by an MTU value.
4103 * However we prefer to set it for an LSO packet (while we don't
4104 * have to) in order to save some CPU cycles in a none-LSO
4105 * case, when we much more care about them.
4106 */
9f6c9258
DK
4107 if (total_pkt_bd != NULL)
4108 total_pkt_bd->total_pkt_bytes = pkt_size;
4109
523224a3 4110 if (pbd_e1x)
9f6c9258 4111 DP(NETIF_MSG_TX_QUEUED,
51c1a580 4112 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
4113 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4114 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4115 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4116 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
4117 if (pbd_e2)
4118 DP(NETIF_MSG_TX_QUEUED,
4119 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
4120 pbd_e2,
4121 pbd_e2->data.mac_addr.dst_hi,
4122 pbd_e2->data.mac_addr.dst_mid,
4123 pbd_e2->data.mac_addr.dst_lo,
4124 pbd_e2->data.mac_addr.src_hi,
4125 pbd_e2->data.mac_addr.src_mid,
4126 pbd_e2->data.mac_addr.src_lo,
f2e0899f 4127 pbd_e2->parsing_data);
9f6c9258
DK
4128 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4129
2df1a70a
TH
4130 netdev_tx_sent_queue(txq, skb->len);
4131
8373c57d
WB
4132 skb_tx_timestamp(skb);
4133
6383c0b3 4134 txdata->tx_pkt_prod++;
9f6c9258
DK
4135 /*
4136 * Make sure that the BD data is updated before updating the producer
4137 * since FW might read the BD right after the producer is updated.
4138 * This is only applicable for weak-ordered memory model archs such
4139 * as IA-64. The following barrier is also mandatory since FW will
4140 * assumes packets must have BDs.
4141 */
4142 wmb();
4143
6383c0b3 4144 txdata->tx_db.data.prod += nbd;
9f6c9258 4145 barrier();
f85582f8 4146
6383c0b3 4147 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
4148
4149 mmiowb();
4150
6383c0b3 4151 txdata->tx_bd_prod += nbd;
9f6c9258 4152
7df2dc6b 4153 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
4154 netif_tx_stop_queue(txq);
4155
4156 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4157 * ordering of set_bit() in netif_tx_stop_queue() and read of
4158 * fp->bd_tx_cons */
4159 smp_mb();
4160
15192a8c 4161 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 4162 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
4163 netif_tx_wake_queue(txq);
4164 }
6383c0b3 4165 txdata->tx_pkt++;
9f6c9258
DK
4166
4167 return NETDEV_TX_OK;
4168}
f85582f8 4169
230d00eb
YM
4170void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4171{
4172 int mfw_vn = BP_FW_MB_IDX(bp);
4173 u32 tmp;
4174
4175 /* If the shmem shouldn't affect configuration, reflect */
4176 if (!IS_MF_BD(bp)) {
4177 int i;
4178
4179 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4180 c2s_map[i] = i;
4181 *c2s_default = 0;
4182
4183 return;
4184 }
4185
4186 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4187 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4188 c2s_map[0] = tmp & 0xff;
4189 c2s_map[1] = (tmp >> 8) & 0xff;
4190 c2s_map[2] = (tmp >> 16) & 0xff;
4191 c2s_map[3] = (tmp >> 24) & 0xff;
4192
4193 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4194 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4195 c2s_map[4] = tmp & 0xff;
4196 c2s_map[5] = (tmp >> 8) & 0xff;
4197 c2s_map[6] = (tmp >> 16) & 0xff;
4198 c2s_map[7] = (tmp >> 24) & 0xff;
4199
4200 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4201 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4202 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4203}
4204
6383c0b3
AE
4205/**
4206 * bnx2x_setup_tc - routine to configure net_device for multi tc
4207 *
4208 * @netdev: net device to configure
4209 * @tc: number of traffic classes to enable
4210 *
4211 * callback connected to the ndo_setup_tc function pointer
4212 */
4213int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4214{
6383c0b3 4215 struct bnx2x *bp = netdev_priv(dev);
230d00eb
YM
4216 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4217 int cos, prio, count, offset;
6383c0b3
AE
4218
4219 /* setup tc must be called under rtnl lock */
4220 ASSERT_RTNL();
4221
16a5fd92 4222 /* no traffic classes requested. Aborting */
6383c0b3
AE
4223 if (!num_tc) {
4224 netdev_reset_tc(dev);
4225 return 0;
4226 }
4227
4228 /* requested to support too many traffic classes */
4229 if (num_tc > bp->max_cos) {
6bf07b8e 4230 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
51c1a580 4231 num_tc, bp->max_cos);
6383c0b3
AE
4232 return -EINVAL;
4233 }
4234
4235 /* declare amount of supported traffic classes */
4236 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 4237 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
4238 return -EINVAL;
4239 }
4240
230d00eb
YM
4241 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4242
6383c0b3
AE
4243 /* configure priority to traffic class mapping */
4244 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
230d00eb
YM
4245 int outer_prio = c2s_map[prio];
4246
4247 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
51c1a580
MS
4248 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4249 "mapping priority %d to tc %d\n",
230d00eb 4250 outer_prio, bp->prio_to_cos[outer_prio]);
6383c0b3
AE
4251 }
4252
16a5fd92 4253 /* Use this configuration to differentiate tc0 from other COSes
6383c0b3
AE
4254 This can be used for ets or pfc, and save the effort of setting
4255 up a multio class queue disc or negotiating DCBX with a switch
4256 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 4257 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
4258 for (prio = 1; prio < 16; prio++) {
4259 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 4260 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
4261 } */
4262
4263 /* configure traffic class to transmission queue mapping */
4264 for (cos = 0; cos < bp->max_cos; cos++) {
4265 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 4266 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 4267 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
4268 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4269 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
4270 cos, offset, count);
4271 }
4272
4273 return 0;
4274}
4275
a5fcf8a6
JP
4276int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
4277 __be16 proto, struct tc_to_netdev *tc)
e4c6734e 4278{
5eb4dce3 4279 if (tc->type != TC_SETUP_MQPRIO)
e4c6734e 4280 return -EINVAL;
56f36acd
AN
4281
4282 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4283
4284 return bnx2x_setup_tc(dev, tc->mqprio->num_tc);
e4c6734e
JF
4285}
4286
9f6c9258
DK
4287/* called with rtnl_lock */
4288int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4289{
4290 struct sockaddr *addr = p;
4291 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 4292 int rc = 0;
9f6c9258 4293
2e98ffc2 4294 if (!is_valid_ether_addr(addr->sa_data)) {
51c1a580 4295 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 4296 return -EINVAL;
51c1a580 4297 }
614c76df 4298
2e98ffc2
DK
4299 if (IS_MF_STORAGE_ONLY(bp)) {
4300 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
9f6c9258 4301 return -EINVAL;
51c1a580 4302 }
9f6c9258 4303
619c5cb6
VZ
4304 if (netif_running(dev)) {
4305 rc = bnx2x_set_eth_mac(bp, false);
4306 if (rc)
4307 return rc;
4308 }
4309
9f6c9258 4310 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 4311
523224a3 4312 if (netif_running(dev))
619c5cb6 4313 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 4314
230d00eb
YM
4315 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4316 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4317
619c5cb6 4318 return rc;
9f6c9258
DK
4319}
4320
b3b83c3f
DK
4321static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4322{
4323 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4324 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 4325 u8 cos;
b3b83c3f
DK
4326
4327 /* Common */
55c11941 4328
b3b83c3f
DK
4329 if (IS_FCOE_IDX(fp_index)) {
4330 memset(sb, 0, sizeof(union host_hc_status_block));
4331 fp->status_blk_mapping = 0;
b3b83c3f 4332 } else {
b3b83c3f 4333 /* status blocks */
619c5cb6 4334 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4335 BNX2X_PCI_FREE(sb->e2_sb,
4336 bnx2x_fp(bp, fp_index,
4337 status_blk_mapping),
4338 sizeof(struct host_hc_status_block_e2));
4339 else
4340 BNX2X_PCI_FREE(sb->e1x_sb,
4341 bnx2x_fp(bp, fp_index,
4342 status_blk_mapping),
4343 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4344 }
55c11941 4345
b3b83c3f
DK
4346 /* Rx */
4347 if (!skip_rx_queue(bp, fp_index)) {
4348 bnx2x_free_rx_bds(fp);
4349
4350 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4351 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4352 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4353 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4354 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4355
4356 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4357 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4358 sizeof(struct eth_fast_path_rx_cqe) *
4359 NUM_RCQ_BD);
4360
4361 /* SGE ring */
4362 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4363 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4364 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4365 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4366 }
4367
4368 /* Tx */
4369 if (!skip_tx_queue(bp, fp_index)) {
4370 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4371 for_each_cos_in_tx_queue(fp, cos) {
65565884 4372 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4373
51c1a580 4374 DP(NETIF_MSG_IFDOWN,
94f05b0f 4375 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
4376 fp_index, cos, txdata->cid);
4377
4378 BNX2X_FREE(txdata->tx_buf_ring);
4379 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4380 txdata->tx_desc_mapping,
4381 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4382 }
b3b83c3f
DK
4383 }
4384 /* end of fastpath */
4385}
4386
a8f47eb7 4387static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4388{
4389 int i;
4390 for_each_cnic_queue(bp, i)
4391 bnx2x_free_fp_mem_at(bp, i);
4392}
4393
b3b83c3f
DK
4394void bnx2x_free_fp_mem(struct bnx2x *bp)
4395{
4396 int i;
55c11941 4397 for_each_eth_queue(bp, i)
b3b83c3f
DK
4398 bnx2x_free_fp_mem_at(bp, i);
4399}
4400
1191cb83 4401static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
4402{
4403 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 4404 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
4405 bnx2x_fp(bp, index, sb_index_values) =
4406 (__le16 *)status_blk.e2_sb->sb.index_values;
4407 bnx2x_fp(bp, index, sb_running_index) =
4408 (__le16 *)status_blk.e2_sb->sb.running_index;
4409 } else {
4410 bnx2x_fp(bp, index, sb_index_values) =
4411 (__le16 *)status_blk.e1x_sb->sb.index_values;
4412 bnx2x_fp(bp, index, sb_running_index) =
4413 (__le16 *)status_blk.e1x_sb->sb.running_index;
4414 }
4415}
4416
1191cb83
ED
4417/* Returns the number of actually allocated BDs */
4418static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4419 int rx_ring_size)
4420{
4421 struct bnx2x *bp = fp->bp;
4422 u16 ring_prod, cqe_ring_prod;
4423 int i, failure_cnt = 0;
4424
4425 fp->rx_comp_cons = 0;
4426 cqe_ring_prod = ring_prod = 0;
4427
4428 /* This routine is called only during fo init so
4429 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4430 */
4431 for (i = 0; i < rx_ring_size; i++) {
996dedba 4432 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
1191cb83
ED
4433 failure_cnt++;
4434 continue;
4435 }
4436 ring_prod = NEXT_RX_IDX(ring_prod);
4437 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4438 WARN_ON(ring_prod <= (i - failure_cnt));
4439 }
4440
4441 if (failure_cnt)
4442 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4443 i - failure_cnt, fp->index);
4444
4445 fp->rx_bd_prod = ring_prod;
4446 /* Limit the CQE producer by the CQE ring size */
4447 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4448 cqe_ring_prod);
1191cb83 4449
15192a8c 4450 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4451
4452 return i - failure_cnt;
4453}
4454
4455static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4456{
4457 int i;
4458
4459 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4460 struct eth_rx_cqe_next_page *nextpg;
4461
4462 nextpg = (struct eth_rx_cqe_next_page *)
4463 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4464 nextpg->addr_hi =
4465 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4466 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4467 nextpg->addr_lo =
4468 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4469 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4470 }
4471}
4472
b3b83c3f
DK
4473static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4474{
4475 union host_hc_status_block *sb;
4476 struct bnx2x_fastpath *fp = &bp->fp[index];
4477 int ring_size = 0;
6383c0b3 4478 u8 cos;
c2188952 4479 int rx_ring_size = 0;
b3b83c3f 4480
2e98ffc2 4481 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
614c76df
DK
4482 rx_ring_size = MIN_RX_SIZE_NONTPA;
4483 bp->rx_ring_size = rx_ring_size;
55c11941 4484 } else if (!bp->rx_ring_size) {
c2188952
VZ
4485 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4486
065f8b92
YM
4487 if (CHIP_IS_E3(bp)) {
4488 u32 cfg = SHMEM_RD(bp,
4489 dev_info.port_hw_config[BP_PORT(bp)].
4490 default_cfg);
4491
4492 /* Decrease ring size for 1G functions */
4493 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4494 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4495 rx_ring_size /= 10;
4496 }
d760fc37 4497
c2188952
VZ
4498 /* allocate at least number of buffers required by FW */
4499 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4500 MIN_RX_SIZE_TPA, rx_ring_size);
4501
4502 bp->rx_ring_size = rx_ring_size;
614c76df 4503 } else /* if rx_ring_size specified - use it */
c2188952 4504 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4505
04c46736
YM
4506 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4507
b3b83c3f
DK
4508 /* Common */
4509 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4510
b3b83c3f 4511 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4512 /* status blocks */
cd2b0389
JP
4513 if (!CHIP_IS_E1x(bp)) {
4514 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4515 sizeof(struct host_hc_status_block_e2));
4516 if (!sb->e2_sb)
4517 goto alloc_mem_err;
4518 } else {
4519 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4520 sizeof(struct host_hc_status_block_e1x));
4521 if (!sb->e1x_sb)
4522 goto alloc_mem_err;
4523 }
b3b83c3f 4524 }
8eef2af1
DK
4525
4526 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4527 * set shortcuts for it.
4528 */
4529 if (!IS_FCOE_IDX(index))
4530 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4531
4532 /* Tx */
4533 if (!skip_tx_queue(bp, index)) {
4534 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4535 for_each_cos_in_tx_queue(fp, cos) {
65565884 4536 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4537
51c1a580
MS
4538 DP(NETIF_MSG_IFUP,
4539 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4540 index, cos);
4541
cd2b0389
JP
4542 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4543 sizeof(struct sw_tx_bd),
4544 GFP_KERNEL);
4545 if (!txdata->tx_buf_ring)
4546 goto alloc_mem_err;
4547 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4548 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4549 if (!txdata->tx_desc_ring)
4550 goto alloc_mem_err;
6383c0b3 4551 }
b3b83c3f
DK
4552 }
4553
4554 /* Rx */
4555 if (!skip_rx_queue(bp, index)) {
4556 /* fastpath rx rings: rx_buf rx_desc rx_comp */
cd2b0389
JP
4557 bnx2x_fp(bp, index, rx_buf_ring) =
4558 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4559 if (!bnx2x_fp(bp, index, rx_buf_ring))
4560 goto alloc_mem_err;
4561 bnx2x_fp(bp, index, rx_desc_ring) =
4562 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4563 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4564 if (!bnx2x_fp(bp, index, rx_desc_ring))
4565 goto alloc_mem_err;
b3b83c3f 4566
75b29459 4567 /* Seed all CQEs by 1s */
cd2b0389
JP
4568 bnx2x_fp(bp, index, rx_comp_ring) =
4569 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4570 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4571 if (!bnx2x_fp(bp, index, rx_comp_ring))
4572 goto alloc_mem_err;
b3b83c3f
DK
4573
4574 /* SGE ring */
cd2b0389
JP
4575 bnx2x_fp(bp, index, rx_page_ring) =
4576 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4577 GFP_KERNEL);
4578 if (!bnx2x_fp(bp, index, rx_page_ring))
4579 goto alloc_mem_err;
4580 bnx2x_fp(bp, index, rx_sge_ring) =
4581 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4582 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4583 if (!bnx2x_fp(bp, index, rx_sge_ring))
4584 goto alloc_mem_err;
b3b83c3f
DK
4585 /* RX BD ring */
4586 bnx2x_set_next_page_rx_bd(fp);
4587
4588 /* CQ ring */
4589 bnx2x_set_next_page_rx_cq(fp);
4590
4591 /* BDs */
4592 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4593 if (ring_size < rx_ring_size)
4594 goto alloc_mem_err;
4595 }
4596
4597 return 0;
4598
4599/* handles low memory cases */
4600alloc_mem_err:
4601 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4602 index, ring_size);
4603 /* FW will drop all packets if queue is not big enough,
4604 * In these cases we disable the queue
6383c0b3 4605 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f 4606 */
7e6b4d44 4607 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
eb722d7a 4608 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4609 /* release memory allocated for this queue */
4610 bnx2x_free_fp_mem_at(bp, index);
4611 return -ENOMEM;
4612 }
4613 return 0;
4614}
4615
a8f47eb7 4616static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4617{
4618 if (!NO_FCOE(bp))
4619 /* FCoE */
4620 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4621 /* we will fail load process instead of mark
4622 * NO_FCOE_FLAG
4623 */
4624 return -ENOMEM;
4625
4626 return 0;
4627}
4628
a8f47eb7 4629static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
b3b83c3f
DK
4630{
4631 int i;
4632
55c11941
MS
4633 /* 1. Allocate FP for leading - fatal if error
4634 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4635 */
4636
4637 /* leading */
4638 if (bnx2x_alloc_fp_mem_at(bp, 0))
4639 return -ENOMEM;
6383c0b3 4640
b3b83c3f
DK
4641 /* RSS */
4642 for_each_nondefault_eth_queue(bp, i)
4643 if (bnx2x_alloc_fp_mem_at(bp, i))
4644 break;
4645
4646 /* handle memory failures */
4647 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4648 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4649
4650 WARN_ON(delta < 0);
4864a16a 4651 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4652 if (CNIC_SUPPORT(bp))
4653 /* move non eth FPs next to last eth FP
4654 * must be done in that order
4655 * FCOE_IDX < FWD_IDX < OOO_IDX
4656 */
b3b83c3f 4657
55c11941
MS
4658 /* move FCoE fp even NO_FCOE_FLAG is on */
4659 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4660 bp->num_ethernet_queues -= delta;
4661 bp->num_queues = bp->num_ethernet_queues +
4662 bp->num_cnic_queues;
b3b83c3f
DK
4663 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4664 bp->num_queues + delta, bp->num_queues);
4665 }
4666
4667 return 0;
4668}
d6214d7a 4669
523224a3
DK
4670void bnx2x_free_mem_bp(struct bnx2x *bp)
4671{
c3146eb6
DK
4672 int i;
4673
4674 for (i = 0; i < bp->fp_array_size; i++)
4675 kfree(bp->fp[i].tpa_info);
523224a3 4676 kfree(bp->fp);
15192a8c
BW
4677 kfree(bp->sp_objs);
4678 kfree(bp->fp_stats);
65565884 4679 kfree(bp->bnx2x_txq);
523224a3
DK
4680 kfree(bp->msix_table);
4681 kfree(bp->ilt);
4682}
4683
0329aba1 4684int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4685{
4686 struct bnx2x_fastpath *fp;
4687 struct msix_entry *tbl;
4688 struct bnx2x_ilt *ilt;
6383c0b3 4689 int msix_table_size = 0;
55c11941 4690 int fp_array_size, txq_array_size;
15192a8c 4691 int i;
6383c0b3
AE
4692
4693 /*
4694 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4695 * path IGU SBs plus default SB (for PF only).
6383c0b3 4696 */
1ab4434c
AE
4697 msix_table_size = bp->igu_sb_cnt;
4698 if (IS_PF(bp))
4699 msix_table_size++;
4700 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4701
6383c0b3 4702 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4703 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4704 bp->fp_array_size = fp_array_size;
4705 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4706
c3146eb6 4707 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4708 if (!fp)
4709 goto alloc_err;
c3146eb6 4710 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4711 fp[i].tpa_info =
4712 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4713 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4714 if (!(fp[i].tpa_info))
4715 goto alloc_err;
4716 }
4717
523224a3
DK
4718 bp->fp = fp;
4719
15192a8c 4720 /* allocate sp objs */
c3146eb6 4721 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4722 GFP_KERNEL);
4723 if (!bp->sp_objs)
4724 goto alloc_err;
4725
4726 /* allocate fp_stats */
c3146eb6 4727 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4728 GFP_KERNEL);
4729 if (!bp->fp_stats)
4730 goto alloc_err;
4731
65565884 4732 /* Allocate memory for the transmission queues array */
55c11941
MS
4733 txq_array_size =
4734 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4735 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4736
4737 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4738 GFP_KERNEL);
65565884
MS
4739 if (!bp->bnx2x_txq)
4740 goto alloc_err;
4741
523224a3 4742 /* msix table */
01e23742 4743 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4744 if (!tbl)
4745 goto alloc_err;
4746 bp->msix_table = tbl;
4747
4748 /* ilt */
4749 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4750 if (!ilt)
4751 goto alloc_err;
4752 bp->ilt = ilt;
4753
4754 return 0;
4755alloc_err:
4756 bnx2x_free_mem_bp(bp);
4757 return -ENOMEM;
523224a3
DK
4758}
4759
a9fccec7 4760int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4761{
4762 struct bnx2x *bp = netdev_priv(dev);
4763
4764 if (unlikely(!netif_running(dev)))
4765 return 0;
4766
5d07d868 4767 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4768 return bnx2x_nic_load(bp, LOAD_NORMAL);
4769}
4770
1ac9e428
YR
4771int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4772{
4773 u32 sel_phy_idx = 0;
4774 if (bp->link_params.num_phys <= 1)
4775 return INT_PHY;
4776
4777 if (bp->link_vars.link_up) {
4778 sel_phy_idx = EXT_PHY1;
4779 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4780 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4781 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4782 sel_phy_idx = EXT_PHY2;
4783 } else {
4784
4785 switch (bnx2x_phy_selection(&bp->link_params)) {
4786 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4787 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4788 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4789 sel_phy_idx = EXT_PHY1;
4790 break;
4791 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4792 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4793 sel_phy_idx = EXT_PHY2;
4794 break;
4795 }
4796 }
4797
4798 return sel_phy_idx;
1ac9e428
YR
4799}
4800int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4801{
4802 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4803 /*
2de67439 4804 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4805 * swapping is enabled). So when swapping is enabled, we need to reverse
4806 * the configuration
4807 */
4808
4809 if (bp->link_params.multi_phy_config &
4810 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4811 if (sel_phy_idx == EXT_PHY1)
4812 sel_phy_idx = EXT_PHY2;
4813 else if (sel_phy_idx == EXT_PHY2)
4814 sel_phy_idx = EXT_PHY1;
4815 }
4816 return LINK_CONFIG_IDX(sel_phy_idx);
4817}
4818
55c11941 4819#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4820int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4821{
4822 struct bnx2x *bp = netdev_priv(dev);
4823 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4824
4825 switch (type) {
4826 case NETDEV_FCOE_WWNN:
4827 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4828 cp->fcoe_wwn_node_name_lo);
4829 break;
4830 case NETDEV_FCOE_WWPN:
4831 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4832 cp->fcoe_wwn_port_name_lo);
4833 break;
4834 default:
51c1a580 4835 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4836 return -EINVAL;
4837 }
4838
4839 return 0;
4840}
4841#endif
4842
9f6c9258
DK
4843/* called with rtnl_lock */
4844int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4845{
4846 struct bnx2x *bp = netdev_priv(dev);
9f6c9258 4847
0650c0b8
YM
4848 if (pci_num_vf(bp->pdev)) {
4849 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4850 return -EPERM;
4851 }
4852
9f6c9258 4853 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4854 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4855 return -EAGAIN;
4856 }
4857
9f6c9258
DK
4858 /* This does not race with packet allocation
4859 * because the actual alloc size is
4860 * only updated as part of load
4861 */
4862 dev->mtu = new_mtu;
4863
230d00eb
YM
4864 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4865 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4866
66371c44
MM
4867 return bnx2x_reload_if_running(dev);
4868}
4869
c8f44aff 4870netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4871 netdev_features_t features)
66371c44
MM
4872{
4873 struct bnx2x *bp = netdev_priv(dev);
4874
909d9faa
YM
4875 if (pci_num_vf(bp->pdev)) {
4876 netdev_features_t changed = dev->features ^ features;
4877
4878 /* Revert the requested changes in features if they
4879 * would require internal reload of PF in bnx2x_set_features().
4880 */
4881 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4882 features &= ~NETIF_F_RXCSUM;
4883 features |= dev->features & NETIF_F_RXCSUM;
4884 }
4885
4886 if (changed & NETIF_F_LOOPBACK) {
4887 features &= ~NETIF_F_LOOPBACK;
4888 features |= dev->features & NETIF_F_LOOPBACK;
4889 }
4890 }
4891
66371c44 4892 /* TPA requires Rx CSUM offloading */
aebf6244 4893 if (!(features & NETIF_F_RXCSUM)) {
66371c44 4894 features &= ~NETIF_F_LRO;
621b4d66
DK
4895 features &= ~NETIF_F_GRO;
4896 }
66371c44
MM
4897
4898 return features;
4899}
4900
c8f44aff 4901int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4902{
4903 struct bnx2x *bp = netdev_priv(dev);
f8dcb5e3 4904 netdev_features_t changes = features ^ dev->features;
538dd2e3 4905 bool bnx2x_reload = false;
f8dcb5e3 4906 int rc;
621b4d66 4907
909d9faa
YM
4908 /* VFs or non SRIOV PFs should be able to change loopback feature */
4909 if (!pci_num_vf(bp->pdev)) {
4910 if (features & NETIF_F_LOOPBACK) {
4911 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4912 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4913 bnx2x_reload = true;
4914 }
4915 } else {
4916 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4917 bp->link_params.loopback_mode = LOOPBACK_NONE;
4918 bnx2x_reload = true;
4919 }
538dd2e3
MB
4920 }
4921 }
4922
16a5fd92 4923 /* if GRO is changed while LRO is enabled, don't force a reload */
f8dcb5e3
MS
4924 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4925 changes &= ~NETIF_F_GRO;
8802f579 4926
aebf6244 4927 /* if GRO is changed while HW TPA is off, don't force a reload */
f8dcb5e3
MS
4928 if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4929 changes &= ~NETIF_F_GRO;
aebf6244 4930
8802f579 4931 if (changes)
538dd2e3 4932 bnx2x_reload = true;
8802f579 4933
538dd2e3 4934 if (bnx2x_reload) {
f8dcb5e3
MS
4935 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4936 dev->features = features;
4937 rc = bnx2x_reload_if_running(dev);
4938 return rc ? rc : 1;
4939 }
66371c44 4940 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4941 }
4942
66371c44 4943 return 0;
9f6c9258
DK
4944}
4945
4946void bnx2x_tx_timeout(struct net_device *dev)
4947{
4948 struct bnx2x *bp = netdev_priv(dev);
4949
4950#ifdef BNX2X_STOP_ON_ERROR
4951 if (!bp->panic)
4952 bnx2x_panic();
4953#endif
7be08a72 4954
9f6c9258 4955 /* This allows the netif to be shutdown gracefully before resetting */
230bb0f3 4956 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
9f6c9258
DK
4957}
4958
9f6c9258
DK
4959int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4960{
4961 struct net_device *dev = pci_get_drvdata(pdev);
4962 struct bnx2x *bp;
4963
4964 if (!dev) {
4965 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4966 return -ENODEV;
4967 }
4968 bp = netdev_priv(dev);
4969
4970 rtnl_lock();
4971
4972 pci_save_state(pdev);
4973
4974 if (!netif_running(dev)) {
4975 rtnl_unlock();
4976 return 0;
4977 }
4978
4979 netif_device_detach(dev);
4980
5d07d868 4981 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4982
4983 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4984
4985 rtnl_unlock();
4986
4987 return 0;
4988}
4989
4990int bnx2x_resume(struct pci_dev *pdev)
4991{
4992 struct net_device *dev = pci_get_drvdata(pdev);
4993 struct bnx2x *bp;
4994 int rc;
4995
4996 if (!dev) {
4997 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4998 return -ENODEV;
4999 }
5000 bp = netdev_priv(dev);
5001
5002 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 5003 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
5004 return -EAGAIN;
5005 }
5006
5007 rtnl_lock();
5008
5009 pci_restore_state(pdev);
5010
5011 if (!netif_running(dev)) {
5012 rtnl_unlock();
5013 return 0;
5014 }
5015
5016 bnx2x_set_power_state(bp, PCI_D0);
5017 netif_device_attach(dev);
5018
5019 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5020
5021 rtnl_unlock();
5022
5023 return rc;
5024}
619c5cb6 5025
619c5cb6
VZ
5026void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5027 u32 cid)
5028{
b9871bcf
AE
5029 if (!cxt) {
5030 BNX2X_ERR("bad context pointer %p\n", cxt);
5031 return;
5032 }
5033
619c5cb6
VZ
5034 /* ustorm cxt validation */
5035 cxt->ustorm_ag_context.cdu_usage =
5036 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5037 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5038 /* xcontext validation */
5039 cxt->xstorm_ag_context.cdu_reserved =
5040 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5041 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5042}
5043
1191cb83
ED
5044static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5045 u8 fw_sb_id, u8 sb_index,
5046 u8 ticks)
619c5cb6 5047{
619c5cb6
VZ
5048 u32 addr = BAR_CSTRORM_INTMEM +
5049 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5050 REG_WR8(bp, addr, ticks);
51c1a580
MS
5051 DP(NETIF_MSG_IFUP,
5052 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5053 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
5054}
5055
1191cb83
ED
5056static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5057 u16 fw_sb_id, u8 sb_index,
5058 u8 disable)
619c5cb6
VZ
5059{
5060 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5061 u32 addr = BAR_CSTRORM_INTMEM +
5062 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
0c14e5ce 5063 u8 flags = REG_RD8(bp, addr);
619c5cb6
VZ
5064 /* clear and set */
5065 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5066 flags |= enable_flag;
0c14e5ce 5067 REG_WR8(bp, addr, flags);
51c1a580
MS
5068 DP(NETIF_MSG_IFUP,
5069 "port %x fw_sb_id %d sb_index %d disable %d\n",
5070 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
5071}
5072
5073void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5074 u8 sb_index, u8 disable, u16 usec)
5075{
5076 int port = BP_PORT(bp);
5077 u8 ticks = usec / BNX2X_BTR;
5078
5079 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5080
5081 disable = disable ? 1 : (usec ? 0 : 1);
5082 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5083}
230bb0f3
YM
5084
5085void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5086 u32 verbose)
5087{
4e857c58 5088 smp_mb__before_atomic();
230bb0f3 5089 set_bit(flag, &bp->sp_rtnl_state);
4e857c58 5090 smp_mb__after_atomic();
230bb0f3
YM
5091 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5092 flag);
5093 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5094}