]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
net: via: VIA_RHINE and VIA_VELOCITY should depend on HAS_DMA
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
08f6dd89 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
9f6c9258
DK
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
c9931896 24#include <linux/crash_dump.h>
9969085e 25#include <net/tcp.h>
f2e0899f 26#include <net/ipv6.h>
7f3e01fe 27#include <net/ip6_checksum.h>
076bb0c8 28#include <net/busy_poll.h>
c0cba59e 29#include <linux/prefetch.h>
9f6c9258 30#include "bnx2x_cmn.h"
523224a3 31#include "bnx2x_init.h"
042181f5 32#include "bnx2x_sp.h"
9f6c9258 33
a8f47eb7 34static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
35static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
36static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
37static int bnx2x_poll(struct napi_struct *napi, int budget);
38
39static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
40{
41 int i;
42
43 /* Add NAPI objects */
44 for_each_rx_queue_cnic(bp, i) {
45 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
46 bnx2x_poll, NAPI_POLL_WEIGHT);
47 napi_hash_add(&bnx2x_fp(bp, i, napi));
48 }
49}
50
51static void bnx2x_add_all_napi(struct bnx2x *bp)
52{
53 int i;
54
55 /* Add NAPI objects */
56 for_each_eth_queue(bp, i) {
57 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
58 bnx2x_poll, NAPI_POLL_WEIGHT);
59 napi_hash_add(&bnx2x_fp(bp, i, napi));
60 }
61}
62
63static int bnx2x_calc_num_queues(struct bnx2x *bp)
64{
7d0445d6 65 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
ff2ad307
MS
66
67 /* Reduce memory usage in kdump environment by using only one queue */
c9931896 68 if (is_kdump_kernel())
ff2ad307
MS
69 nq = 1;
70
7d0445d6
MS
71 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
72 return nq;
a8f47eb7 73}
74
b3b83c3f
DK
75/**
76 * bnx2x_move_fp - move content of the fastpath structure.
77 *
78 * @bp: driver handle
79 * @from: source FP index
80 * @to: destination FP index
81 *
82 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
83 * intact. This is done by first copying the napi struct from
84 * the target to the source, and then mem copying the entire
65565884
MS
85 * source onto the target. Update txdata pointers and related
86 * content.
b3b83c3f
DK
87 */
88static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89{
90 struct bnx2x_fastpath *from_fp = &bp->fp[from];
91 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
92 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
96 int old_max_eth_txqs, new_max_eth_txqs;
97 int old_txdata_index = 0, new_txdata_index = 0;
34d5626a 98 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
72754080
AE
99
100 /* Copy the NAPI object as it has been already initialized */
101 from_fp->napi = to_fp->napi;
102
b3b83c3f
DK
103 /* Move bnx2x_fastpath contents */
104 memcpy(to_fp, from_fp, sizeof(*to_fp));
105 to_fp->index = to;
65565884 106
34d5626a
YM
107 /* Retain the tpa_info of the original `to' version as we don't want
108 * 2 FPs to contain the same tpa_info pointer.
109 */
110 to_fp->tpa_info = old_tpa_info;
111
15192a8c
BW
112 /* move sp_objs contents as well, as their indices match fp ones */
113 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114
115 /* move fp_stats contents as well, as their indices match fp ones */
116 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117
65565884
MS
118 /* Update txdata pointers in fp and move txdata content accordingly:
119 * Each fp consumes 'max_cos' txdata structures, so the index should be
120 * decremented by max_cos x delta.
121 */
122
123 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 (bp)->max_cos;
126 if (from == FCOE_IDX(bp)) {
127 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
129 }
130
4864a16a
YM
131 memcpy(&bp->bnx2x_txq[new_txdata_index],
132 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
133 sizeof(struct bnx2x_fp_txdata));
134 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
135}
136
8ca5e17e
AE
137/**
138 * bnx2x_fill_fw_str - Fill buffer with FW version string.
139 *
140 * @bp: driver handle
141 * @buf: character buffer to fill with the fw name
142 * @buf_len: length of the above buffer
143 *
144 */
145void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
146{
147 if (IS_PF(bp)) {
148 u8 phy_fw_ver[PHY_FW_VER_LEN];
149
150 phy_fw_ver[0] = '\0';
151 bnx2x_get_ext_phy_fw_version(&bp->link_params,
152 phy_fw_ver, PHY_FW_VER_LEN);
153 strlcpy(buf, bp->fw_ver, buf_len);
154 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 "bc %d.%d.%d%s%s",
156 (bp->common.bc_ver & 0xff0000) >> 16,
157 (bp->common.bc_ver & 0xff00) >> 8,
158 (bp->common.bc_ver & 0xff),
159 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 } else {
6411280a 161 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
162 }
163}
164
4864a16a
YM
165/**
166 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
167 *
168 * @bp: driver handle
169 * @delta: number of eth queues which were not allocated
170 */
171static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172{
173 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174
175 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
16a5fd92 176 * backward along the array could cause memory to be overridden
4864a16a
YM
177 */
178 for (cos = 1; cos < bp->max_cos; cos++) {
179 for (i = 0; i < old_eth_num - delta; i++) {
180 struct bnx2x_fastpath *fp = &bp->fp[i];
181 int new_idx = cos * (old_eth_num - delta) + i;
182
183 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184 sizeof(struct bnx2x_fp_txdata));
185 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
186 }
187 }
188}
189
a8f47eb7 190int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
619c5cb6 191
9f6c9258
DK
192/* free skb in the packet ring at pos idx
193 * return idx of last bd freed
194 */
6383c0b3 195static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
196 u16 idx, unsigned int *pkts_compl,
197 unsigned int *bytes_compl)
9f6c9258 198{
6383c0b3 199 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
200 struct eth_tx_start_bd *tx_start_bd;
201 struct eth_tx_bd *tx_data_bd;
202 struct sk_buff *skb = tx_buf->skb;
203 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 int nbd;
95e92fd4 205 u16 split_bd_len = 0;
9f6c9258
DK
206
207 /* prefetch skb end pointer to speedup dev_kfree_skb() */
208 prefetch(&skb->end);
209
51c1a580 210 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 211 txdata->txq_index, idx, tx_buf, skb);
9f6c9258 212
6383c0b3 213 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258
DK
214
215 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216#ifdef BNX2X_STOP_ON_ERROR
217 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218 BNX2X_ERR("BAD nbd!\n");
219 bnx2x_panic();
220 }
221#endif
222 new_cons = nbd + tx_buf->first_bd;
223
224 /* Get the next bd */
225 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226
227 /* Skip a parse bd... */
228 --nbd;
229 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230
fe26566d
DK
231 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232 /* Skip second parse bd... */
233 --nbd;
234 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
235 }
236
95e92fd4 237 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
9f6c9258 238 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
95e92fd4
MS
239 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
9f6c9258
DK
241 --nbd;
242 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
243 }
244
95e92fd4
MS
245 /* unmap first bd */
246 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
248 DMA_TO_DEVICE);
249
9f6c9258
DK
250 /* now free frags */
251 while (nbd > 0) {
252
6383c0b3 253 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
254 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256 if (--nbd)
257 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
258 }
259
260 /* release skb */
261 WARN_ON(!skb);
d8290ae5 262 if (likely(skb)) {
2df1a70a
TH
263 (*pkts_compl)++;
264 (*bytes_compl) += skb->len;
265 }
d8290ae5 266
40955532 267 dev_kfree_skb_any(skb);
9f6c9258
DK
268 tx_buf->first_bd = 0;
269 tx_buf->skb = NULL;
270
271 return new_cons;
272}
273
6383c0b3 274int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 275{
9f6c9258 276 struct netdev_queue *txq;
6383c0b3 277 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 278 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
279
280#ifdef BNX2X_STOP_ON_ERROR
281 if (unlikely(bp->panic))
282 return -1;
283#endif
284
6383c0b3
AE
285 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
288
289 while (sw_cons != hw_cons) {
290 u16 pkt_cons;
291
292 pkt_cons = TX_BD(sw_cons);
293
51c1a580
MS
294 DP(NETIF_MSG_TX_DONE,
295 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 296 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 297
2df1a70a 298 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 299 &pkts_compl, &bytes_compl);
2df1a70a 300
9f6c9258
DK
301 sw_cons++;
302 }
303
2df1a70a
TH
304 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
305
6383c0b3
AE
306 txdata->tx_pkt_cons = sw_cons;
307 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
308
309 /* Need to make the tx_bd_cons update visible to start_xmit()
310 * before checking for netif_tx_queue_stopped(). Without the
311 * memory barrier, there is a small possibility that
312 * start_xmit() will miss it and cause the queue to be stopped
313 * forever.
619c5cb6
VZ
314 * On the other hand we need an rmb() here to ensure the proper
315 * ordering of bit testing in the following
316 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
317 */
318 smp_mb();
319
9f6c9258 320 if (unlikely(netif_tx_queue_stopped(txq))) {
16a5fd92 321 /* Taking tx_lock() is needed to prevent re-enabling the queue
9f6c9258
DK
322 * while it's empty. This could have happen if rx_action() gets
323 * suspended in bnx2x_tx_int() after the condition before
324 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
325 *
326 * stops the queue->sees fresh tx_bd_cons->releases the queue->
327 * sends some packets consuming the whole queue again->
328 * stops the queue
329 */
330
331 __netif_tx_lock(txq, smp_processor_id());
332
333 if ((netif_tx_queue_stopped(txq)) &&
334 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 335 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
336 netif_tx_wake_queue(txq);
337
338 __netif_tx_unlock(txq);
339 }
340 return 0;
341}
342
343static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
344 u16 idx)
345{
346 u16 last_max = fp->last_max_sge;
347
348 if (SUB_S16(idx, last_max) > 0)
349 fp->last_max_sge = idx;
350}
351
621b4d66
DK
352static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
353 u16 sge_len,
354 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
355{
356 struct bnx2x *bp = fp->bp;
9f6c9258
DK
357 u16 last_max, last_elem, first_elem;
358 u16 delta = 0;
359 u16 i;
360
361 if (!sge_len)
362 return;
363
364 /* First mark all used pages */
365 for (i = 0; i < sge_len; i++)
619c5cb6 366 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 367 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
368
369 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 370 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
371
372 /* Here we assume that the last SGE index is the biggest */
373 prefetch((void *)(fp->sge_mask));
523224a3 374 bnx2x_update_last_max_sge(fp,
621b4d66 375 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
376
377 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
378 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
379 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
380
381 /* If ring is not full */
382 if (last_elem + 1 != first_elem)
383 last_elem++;
384
385 /* Now update the prod */
386 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
387 if (likely(fp->sge_mask[i]))
388 break;
389
619c5cb6
VZ
390 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
391 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
392 }
393
394 if (delta > 0) {
395 fp->rx_sge_prod += delta;
396 /* clear page-end entries */
397 bnx2x_clear_sge_mask_next_elems(fp);
398 }
399
400 DP(NETIF_MSG_RX_STATUS,
401 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
402 fp->last_max_sge, fp->rx_sge_prod);
403}
404
2de67439 405/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
406 * CQE (calculated by HW).
407 */
408static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb 409 const struct eth_fast_path_rx_cqe *cqe,
5495ab75 410 enum pkt_hash_types *rxhash_type)
e52fcb24 411{
2de67439 412 /* Get Toeplitz hash from CQE */
e52fcb24 413 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
414 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
415 enum eth_rss_hash_type htype;
416
417 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
5495ab75
TH
418 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
419 (htype == TCP_IPV6_HASH_TYPE)) ?
420 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
421
e52fcb24 422 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb 423 }
5495ab75 424 *rxhash_type = PKT_HASH_TYPE_NONE;
e52fcb24
ED
425 return 0;
426}
427
9f6c9258 428static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 429 u16 cons, u16 prod,
619c5cb6 430 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
431{
432 struct bnx2x *bp = fp->bp;
433 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
434 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
435 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
436 dma_addr_t mapping;
619c5cb6
VZ
437 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
438 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 439
619c5cb6
VZ
440 /* print error if current state != stop */
441 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
442 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
443
e52fcb24 444 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 445 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 446 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
447 fp->rx_buf_size, DMA_FROM_DEVICE);
448 /*
449 * ...if it fails - move the skb from the consumer to the producer
450 * and set the current aggregation state as ERROR to drop it
451 * when TPA_STOP arrives.
452 */
453
454 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
455 /* Move the BD from the consumer to the producer */
e52fcb24 456 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
457 tpa_info->tpa_state = BNX2X_TPA_ERROR;
458 return;
459 }
9f6c9258 460
e52fcb24
ED
461 /* move empty data from pool to prod */
462 prod_rx_buf->data = first_buf->data;
619c5cb6 463 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 464 /* point prod_bd to new data */
9f6c9258
DK
465 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
466 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
467
619c5cb6
VZ
468 /* move partial skb from cons to pool (don't unmap yet) */
469 *first_buf = *cons_rx_buf;
470
471 /* mark bin state as START */
472 tpa_info->parsing_flags =
473 le16_to_cpu(cqe->pars_flags.flags);
474 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
475 tpa_info->tpa_state = BNX2X_TPA_START;
476 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
477 tpa_info->placement_offset = cqe->placement_offset;
5495ab75 478 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
621b4d66
DK
479 if (fp->mode == TPA_MODE_GRO) {
480 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 481 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
482 tpa_info->gro_size = gro_size;
483 }
619c5cb6 484
9f6c9258
DK
485#ifdef BNX2X_STOP_ON_ERROR
486 fp->tpa_queue_used |= (1 << queue);
9f6c9258 487 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
9f6c9258
DK
488 fp->tpa_queue_used);
489#endif
490}
491
e4e3c02a
VZ
492/* Timestamp option length allowed for TPA aggregation:
493 *
494 * nop nop kind length echo val
495 */
496#define TPA_TSTAMP_OPT_LEN 12
497/**
cbf1de72 498 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 499 *
cbf1de72 500 * @skb: packet skb
e8920674
DK
501 * @parsing_flags: parsing flags from the START CQE
502 * @len_on_bd: total length of the first packet for the
503 * aggregation.
cbf1de72 504 * @pkt_len: length of all segments
e8920674
DK
505 *
506 * Approximate value of the MSS for this aggregation calculated using
507 * the first packet of it.
2de67439 508 * Compute number of aggregated segments, and gso_type.
e4e3c02a 509 */
cbf1de72 510static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
511 u16 len_on_bd, unsigned int pkt_len,
512 u16 num_of_coalesced_segs)
e4e3c02a 513{
cbf1de72 514 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 515 * other than timestamp or IPv6 extension headers.
e4e3c02a 516 */
619c5cb6
VZ
517 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
518
519 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 520 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 521 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
522 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
523 } else {
619c5cb6 524 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
526 }
e4e3c02a
VZ
527
528 /* Check if there was a TCP timestamp, if there is it's will
529 * always be 12 bytes length: nop nop kind length echo val.
530 *
531 * Otherwise FW would close the aggregation.
532 */
533 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
534 hdrs_len += TPA_TSTAMP_OPT_LEN;
535
cbf1de72
YM
536 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
537
538 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
539 * to skb_shinfo(skb)->gso_segs
540 */
ab5777d7 541 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
542}
543
996dedba
MS
544static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
545 u16 index, gfp_t gfp_mask)
1191cb83 546{
1191cb83
ED
547 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
548 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
4cace675 549 struct bnx2x_alloc_pool *pool = &fp->page_pool;
1191cb83
ED
550 dma_addr_t mapping;
551
4cace675 552 if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
1191cb83 553
4cace675
GKB
554 /* put page reference used by the memory pool, since we
555 * won't be using this page as the mempool anymore.
556 */
557 if (pool->page)
558 put_page(pool->page);
559
560 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
561 if (unlikely(!pool->page)) {
562 BNX2X_ERR("Can't alloc sge\n");
563 return -ENOMEM;
564 }
565
566 pool->dma = dma_map_page(&bp->pdev->dev, pool->page, 0,
567 PAGE_SIZE, DMA_FROM_DEVICE);
568 if (unlikely(dma_mapping_error(&bp->pdev->dev,
569 pool->dma))) {
570 __free_pages(pool->page, PAGES_PER_SGE_SHIFT);
571 pool->page = NULL;
572 BNX2X_ERR("Can't map sge\n");
573 return -ENOMEM;
574 }
575 pool->offset = 0;
1191cb83
ED
576 }
577
4cace675
GKB
578 get_page(pool->page);
579 sw_buf->page = pool->page;
580 sw_buf->offset = pool->offset;
581
582 mapping = pool->dma + sw_buf->offset;
1191cb83
ED
583 dma_unmap_addr_set(sw_buf, mapping, mapping);
584
585 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
586 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
587
4cace675
GKB
588 pool->offset += SGE_PAGE_SIZE;
589
1191cb83
ED
590 return 0;
591}
592
9f6c9258 593static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
594 struct bnx2x_agg_info *tpa_info,
595 u16 pages,
596 struct sk_buff *skb,
619c5cb6
VZ
597 struct eth_end_agg_rx_cqe *cqe,
598 u16 cqe_idx)
9f6c9258
DK
599{
600 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
601 u32 i, frag_len, frag_size;
602 int err, j, frag_id = 0;
619c5cb6 603 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 604 u16 full_page = 0, gro_size = 0;
9f6c9258 605
619c5cb6 606 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
607
608 if (fp->mode == TPA_MODE_GRO) {
609 gro_size = tpa_info->gro_size;
610 full_page = tpa_info->full_page;
611 }
9f6c9258
DK
612
613 /* This is needed in order to enable forwarding support */
cbf1de72
YM
614 if (frag_size)
615 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
616 le16_to_cpu(cqe->pkt_len),
617 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 618
9f6c9258 619#ifdef BNX2X_STOP_ON_ERROR
924d75ab 620 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
621 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
622 pages, cqe_idx);
619c5cb6 623 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
624 bnx2x_panic();
625 return -EINVAL;
626 }
627#endif
628
629 /* Run through the SGL and compose the fragmented skb */
630 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 631 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
632
633 /* FW gives the indices of the SGE as if the ring is an array
634 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
635 if (fp->mode == TPA_MODE_GRO)
636 frag_len = min_t(u32, frag_size, (u32)full_page);
637 else /* LRO */
924d75ab 638 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 639
9f6c9258
DK
640 rx_pg = &fp->rx_page_ring[sge_idx];
641 old_rx_pg = *rx_pg;
642
643 /* If we fail to allocate a substitute page, we simply stop
644 where we are and drop the whole packet */
996dedba 645 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
9f6c9258 646 if (unlikely(err)) {
15192a8c 647 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
648 return err;
649 }
650
4cace675
GKB
651 dma_unmap_single(&bp->pdev->dev,
652 dma_unmap_addr(&old_rx_pg, mapping),
653 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
9f6c9258 654 /* Add one frag and update the appropriate fields in the skb */
621b4d66 655 if (fp->mode == TPA_MODE_LRO)
4cace675
GKB
656 skb_fill_page_desc(skb, j, old_rx_pg.page,
657 old_rx_pg.offset, frag_len);
621b4d66
DK
658 else { /* GRO */
659 int rem;
660 int offset = 0;
661 for (rem = frag_len; rem > 0; rem -= gro_size) {
662 int len = rem > gro_size ? gro_size : rem;
663 skb_fill_page_desc(skb, frag_id++,
4cace675
GKB
664 old_rx_pg.page,
665 old_rx_pg.offset + offset,
666 len);
621b4d66
DK
667 if (offset)
668 get_page(old_rx_pg.page);
669 offset += len;
670 }
671 }
9f6c9258
DK
672
673 skb->data_len += frag_len;
924d75ab 674 skb->truesize += SGE_PAGES;
9f6c9258
DK
675 skb->len += frag_len;
676
677 frag_size -= frag_len;
678 }
679
680 return 0;
681}
682
d46d132c
ED
683static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
684{
685 if (fp->rx_frag_size)
e51423d9 686 skb_free_frag(data);
d46d132c
ED
687 else
688 kfree(data);
689}
690
996dedba 691static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
d46d132c 692{
996dedba
MS
693 if (fp->rx_frag_size) {
694 /* GFP_KERNEL allocations are used only during initialization */
695 if (unlikely(gfp_mask & __GFP_WAIT))
696 return (void *)__get_free_page(gfp_mask);
697
d46d132c 698 return netdev_alloc_frag(fp->rx_frag_size);
996dedba 699 }
d46d132c 700
996dedba 701 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
d46d132c
ED
702}
703
9969085e
YM
704#ifdef CONFIG_INET
705static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
706{
707 const struct iphdr *iph = ip_hdr(skb);
708 struct tcphdr *th;
709
710 skb_set_transport_header(skb, sizeof(struct iphdr));
711 th = tcp_hdr(skb);
712
713 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
714 iph->saddr, iph->daddr, 0);
715}
716
717static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
718{
719 struct ipv6hdr *iph = ipv6_hdr(skb);
720 struct tcphdr *th;
721
722 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
723 th = tcp_hdr(skb);
724
725 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
726 &iph->saddr, &iph->daddr, 0);
727}
2c2d06d5
YM
728
729static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
730 void (*gro_func)(struct bnx2x*, struct sk_buff*))
731{
732 skb_set_network_header(skb, 0);
733 gro_func(bp, skb);
734 tcp_gro_complete(skb);
735}
9969085e
YM
736#endif
737
738static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
739 struct sk_buff *skb)
740{
741#ifdef CONFIG_INET
cbf1de72 742 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
743 switch (be16_to_cpu(skb->protocol)) {
744 case ETH_P_IP:
2c2d06d5 745 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
9969085e
YM
746 break;
747 case ETH_P_IPV6:
2c2d06d5 748 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
9969085e
YM
749 break;
750 default:
2c2d06d5 751 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
9969085e
YM
752 be16_to_cpu(skb->protocol));
753 }
9969085e
YM
754 }
755#endif
60e66fee 756 skb_record_rx_queue(skb, fp->rx_queue);
9969085e
YM
757 napi_gro_receive(&fp->napi, skb);
758}
759
1191cb83
ED
760static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
761 struct bnx2x_agg_info *tpa_info,
762 u16 pages,
763 struct eth_end_agg_rx_cqe *cqe,
764 u16 cqe_idx)
9f6c9258 765{
619c5cb6 766 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 767 u8 pad = tpa_info->placement_offset;
619c5cb6 768 u16 len = tpa_info->len_on_bd;
e52fcb24 769 struct sk_buff *skb = NULL;
621b4d66 770 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
771 u8 old_tpa_state = tpa_info->tpa_state;
772
773 tpa_info->tpa_state = BNX2X_TPA_STOP;
774
775 /* If we there was an error during the handling of the TPA_START -
776 * drop this aggregation.
777 */
778 if (old_tpa_state == BNX2X_TPA_ERROR)
779 goto drop;
780
e52fcb24 781 /* Try to allocate the new data */
996dedba 782 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
9f6c9258
DK
783 /* Unmap skb in the pool anyway, as we are going to change
784 pool entry status to BNX2X_TPA_STOP even if new skb allocation
785 fails. */
786 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 787 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 788 if (likely(new_data))
d46d132c 789 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 790
e52fcb24 791 if (likely(skb)) {
9f6c9258 792#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 793 if (pad + len > fp->rx_buf_size) {
51c1a580 794 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 795 pad, len, fp->rx_buf_size);
9f6c9258
DK
796 bnx2x_panic();
797 return;
798 }
799#endif
800
e52fcb24 801 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 802 skb_put(skb, len);
5495ab75 803 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
9f6c9258
DK
804
805 skb->protocol = eth_type_trans(skb, bp->dev);
806 skb->ip_summed = CHECKSUM_UNNECESSARY;
807
621b4d66
DK
808 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
809 skb, cqe, cqe_idx)) {
619c5cb6 810 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
86a9bad3 811 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
9969085e 812 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 813 } else {
51c1a580
MS
814 DP(NETIF_MSG_RX_STATUS,
815 "Failed to allocate new pages - dropping packet!\n");
40955532 816 dev_kfree_skb_any(skb);
9f6c9258
DK
817 }
818
e52fcb24
ED
819 /* put new data in bin */
820 rx_buf->data = new_data;
9f6c9258 821
619c5cb6 822 return;
9f6c9258 823 }
07b0f009
ED
824 if (new_data)
825 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
826drop:
827 /* drop the packet and keep the buffer in the bin */
828 DP(NETIF_MSG_RX_STATUS,
829 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 830 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
831}
832
996dedba
MS
833static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
834 u16 index, gfp_t gfp_mask)
1191cb83
ED
835{
836 u8 *data;
837 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
838 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
839 dma_addr_t mapping;
840
996dedba 841 data = bnx2x_frag_alloc(fp, gfp_mask);
1191cb83
ED
842 if (unlikely(data == NULL))
843 return -ENOMEM;
844
845 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
846 fp->rx_buf_size,
847 DMA_FROM_DEVICE);
848 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 849 bnx2x_frag_free(fp, data);
1191cb83
ED
850 BNX2X_ERR("Can't map rx data\n");
851 return -ENOMEM;
852 }
853
854 rx_buf->data = data;
855 dma_unmap_addr_set(rx_buf, mapping, mapping);
856
857 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
858 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
859
860 return 0;
861}
862
15192a8c
BW
863static
864void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
865 struct bnx2x_fastpath *fp,
866 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 867{
e488921f
MS
868 /* Do nothing if no L4 csum validation was done.
869 * We do not check whether IP csum was validated. For IPv4 we assume
870 * that if the card got as far as validating the L4 csum, it also
871 * validated the IP csum. IPv6 has no IP csum.
872 */
d6cb3e41 873 if (cqe->fast_path_cqe.status_flags &
e488921f 874 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
875 return;
876
e488921f 877 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
878
879 if (cqe->fast_path_cqe.type_error_flags &
880 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
881 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 882 qstats->hw_csum_err++;
d6cb3e41
ED
883 else
884 skb->ip_summed = CHECKSUM_UNNECESSARY;
885}
9f6c9258 886
a8f47eb7 887static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
9f6c9258
DK
888{
889 struct bnx2x *bp = fp->bp;
890 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
75b29459 891 u16 sw_comp_cons, sw_comp_prod;
9f6c9258 892 int rx_pkt = 0;
75b29459
DK
893 union eth_rx_cqe *cqe;
894 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258
DK
895
896#ifdef BNX2X_STOP_ON_ERROR
897 if (unlikely(bp->panic))
898 return 0;
899#endif
b3529744
EB
900 if (budget <= 0)
901 return rx_pkt;
9f6c9258 902
9f6c9258
DK
903 bd_cons = fp->rx_bd_cons;
904 bd_prod = fp->rx_bd_prod;
905 bd_prod_fw = bd_prod;
906 sw_comp_cons = fp->rx_comp_cons;
907 sw_comp_prod = fp->rx_comp_prod;
908
75b29459
DK
909 comp_ring_cons = RCQ_BD(sw_comp_cons);
910 cqe = &fp->rx_comp_ring[comp_ring_cons];
911 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
912
913 DP(NETIF_MSG_RX_STATUS,
75b29459 914 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
9f6c9258 915
75b29459 916 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
9f6c9258
DK
917 struct sw_rx_bd *rx_buf = NULL;
918 struct sk_buff *skb;
9f6c9258 919 u8 cqe_fp_flags;
619c5cb6 920 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 921 u16 len, pad, queue;
e52fcb24 922 u8 *data;
bd5cef03 923 u32 rxhash;
5495ab75 924 enum pkt_hash_types rxhash_type;
9f6c9258 925
619c5cb6
VZ
926#ifdef BNX2X_STOP_ON_ERROR
927 if (unlikely(bp->panic))
928 return 0;
929#endif
930
9f6c9258
DK
931 bd_prod = RX_BD(bd_prod);
932 bd_cons = RX_BD(bd_cons);
933
9aaae044 934 /* A rmb() is required to ensure that the CQE is not read
935 * before it is written by the adapter DMA. PCI ordering
936 * rules will make sure the other fields are written before
937 * the marker at the end of struct eth_fast_path_rx_cqe
938 * but without rmb() a weakly ordered processor can process
939 * stale data. Without the barrier TPA state-machine might
940 * enter inconsistent state and kernel stack might be
941 * provided with incorrect packet description - these lead
942 * to various kernel crashed.
943 */
944 rmb();
945
619c5cb6
VZ
946 cqe_fp_flags = cqe_fp->type_error_flags;
947 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 948
51c1a580
MS
949 DP(NETIF_MSG_RX_STATUS,
950 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
951 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
952 cqe_fp_flags, cqe_fp->status_flags,
953 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
954 le16_to_cpu(cqe_fp->vlan_tag),
955 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
956
957 /* is this a slowpath msg? */
619c5cb6 958 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
959 bnx2x_sp_event(fp, cqe);
960 goto next_cqe;
e52fcb24 961 }
621b4d66 962
e52fcb24
ED
963 rx_buf = &fp->rx_buf_ring[bd_cons];
964 data = rx_buf->data;
9f6c9258 965
e52fcb24 966 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
967 struct bnx2x_agg_info *tpa_info;
968 u16 frag_size, pages;
619c5cb6 969#ifdef BNX2X_STOP_ON_ERROR
e52fcb24 970 /* sanity check */
7e6b4d44 971 if (fp->mode == TPA_MODE_DISABLED &&
e52fcb24
ED
972 (CQE_TYPE_START(cqe_fp_type) ||
973 CQE_TYPE_STOP(cqe_fp_type)))
7e6b4d44 974 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
e52fcb24 975 CQE_TYPE(cqe_fp_type));
619c5cb6 976#endif
9f6c9258 977
e52fcb24
ED
978 if (CQE_TYPE_START(cqe_fp_type)) {
979 u16 queue = cqe_fp->queue_index;
980 DP(NETIF_MSG_RX_STATUS,
981 "calling tpa_start on queue %d\n",
982 queue);
9f6c9258 983
e52fcb24
ED
984 bnx2x_tpa_start(fp, queue,
985 bd_cons, bd_prod,
986 cqe_fp);
621b4d66 987
e52fcb24 988 goto next_rx;
621b4d66
DK
989 }
990 queue = cqe->end_agg_cqe.queue_index;
991 tpa_info = &fp->tpa_info[queue];
992 DP(NETIF_MSG_RX_STATUS,
993 "calling tpa_stop on queue %d\n",
994 queue);
995
996 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
997 tpa_info->len_on_bd;
998
999 if (fp->mode == TPA_MODE_GRO)
1000 pages = (frag_size + tpa_info->full_page - 1) /
1001 tpa_info->full_page;
1002 else
1003 pages = SGE_PAGE_ALIGN(frag_size) >>
1004 SGE_PAGE_SHIFT;
1005
1006 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1007 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 1008#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
1009 if (bp->panic)
1010 return 0;
9f6c9258
DK
1011#endif
1012
621b4d66
DK
1013 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1014 goto next_cqe;
e52fcb24
ED
1015 }
1016 /* non TPA */
621b4d66 1017 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
1018 pad = cqe_fp->placement_offset;
1019 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 1020 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
1021 pad + RX_COPY_THRESH,
1022 DMA_FROM_DEVICE);
1023 pad += NET_SKB_PAD;
1024 prefetch(data + pad); /* speedup eth_type_trans() */
1025 /* is this an error packet? */
1026 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 1027 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
1028 "ERROR flags %x rx packet %u\n",
1029 cqe_fp_flags, sw_comp_cons);
15192a8c 1030 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
1031 goto reuse_rx;
1032 }
9f6c9258 1033
e52fcb24
ED
1034 /* Since we don't have a jumbo ring
1035 * copy small packets if mtu > 1500
1036 */
1037 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1038 (len <= RX_COPY_THRESH)) {
45abfb10 1039 skb = napi_alloc_skb(&fp->napi, len);
e52fcb24 1040 if (skb == NULL) {
51c1a580 1041 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 1042 "ERROR packet dropped because of alloc failure\n");
15192a8c 1043 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
1044 goto reuse_rx;
1045 }
e52fcb24
ED
1046 memcpy(skb->data, data + pad, len);
1047 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1048 } else {
996dedba
MS
1049 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1050 GFP_ATOMIC) == 0)) {
9f6c9258 1051 dma_unmap_single(&bp->pdev->dev,
e52fcb24 1052 dma_unmap_addr(rx_buf, mapping),
a8c94b91 1053 fp->rx_buf_size,
9f6c9258 1054 DMA_FROM_DEVICE);
d46d132c 1055 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 1056 if (unlikely(!skb)) {
d46d132c 1057 bnx2x_frag_free(fp, data);
15192a8c
BW
1058 bnx2x_fp_qstats(bp, fp)->
1059 rx_skb_alloc_failed++;
e52fcb24
ED
1060 goto next_rx;
1061 }
9f6c9258 1062 skb_reserve(skb, pad);
9f6c9258 1063 } else {
51c1a580
MS
1064 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1065 "ERROR packet dropped because of alloc failure\n");
15192a8c 1066 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 1067reuse_rx:
e52fcb24 1068 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
1069 goto next_rx;
1070 }
036d2df9 1071 }
9f6c9258 1072
036d2df9
DK
1073 skb_put(skb, len);
1074 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 1075
036d2df9 1076 /* Set Toeplitz hash for a none-LRO skb */
5495ab75
TH
1077 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1078 skb_set_hash(skb, rxhash, rxhash_type);
9f6c9258 1079
036d2df9 1080 skb_checksum_none_assert(skb);
f85582f8 1081
d6cb3e41 1082 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
1083 bnx2x_csum_validate(skb, cqe, fp,
1084 bnx2x_fp_qstats(bp, fp));
9f6c9258 1085
f233cafe 1086 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 1087
eeed018c 1088 /* Check if this packet was timestamped */
56daf66d 1089 if (unlikely(cqe->fast_path_cqe.type_error_flags &
eeed018c
MK
1090 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1091 bnx2x_set_rx_ts(bp, skb);
1092
619c5cb6
VZ
1093 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1094 PARSING_FLAGS_VLAN)
86a9bad3 1095 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
619c5cb6 1096 le16_to_cpu(cqe_fp->vlan_tag));
9f6c9258 1097
8b80cda5 1098 skb_mark_napi_id(skb, &fp->napi);
8f20aa57
DK
1099
1100 if (bnx2x_fp_ll_polling(fp))
1101 netif_receive_skb(skb);
1102 else
1103 napi_gro_receive(&fp->napi, skb);
9f6c9258 1104next_rx:
e52fcb24 1105 rx_buf->data = NULL;
9f6c9258
DK
1106
1107 bd_cons = NEXT_RX_IDX(bd_cons);
1108 bd_prod = NEXT_RX_IDX(bd_prod);
1109 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1110 rx_pkt++;
1111next_cqe:
1112 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1113 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1114
75b29459
DK
1115 /* mark CQE as free */
1116 BNX2X_SEED_CQE(cqe_fp);
1117
9f6c9258
DK
1118 if (rx_pkt == budget)
1119 break;
75b29459
DK
1120
1121 comp_ring_cons = RCQ_BD(sw_comp_cons);
1122 cqe = &fp->rx_comp_ring[comp_ring_cons];
1123 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
1124 } /* while */
1125
1126 fp->rx_bd_cons = bd_cons;
1127 fp->rx_bd_prod = bd_prod_fw;
1128 fp->rx_comp_cons = sw_comp_cons;
1129 fp->rx_comp_prod = sw_comp_prod;
1130
1131 /* Update producers */
1132 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1133 fp->rx_sge_prod);
1134
1135 fp->rx_pkt += rx_pkt;
1136 fp->rx_calls++;
1137
1138 return rx_pkt;
1139}
1140
1141static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1142{
1143 struct bnx2x_fastpath *fp = fp_cookie;
1144 struct bnx2x *bp = fp->bp;
6383c0b3 1145 u8 cos;
9f6c9258 1146
51c1a580
MS
1147 DP(NETIF_MSG_INTR,
1148 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3 1149 fp->index, fp->fw_sb_id, fp->igu_sb_id);
ecf01c22 1150
523224a3 1151 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1152
1153#ifdef BNX2X_STOP_ON_ERROR
1154 if (unlikely(bp->panic))
1155 return IRQ_HANDLED;
1156#endif
1157
1158 /* Handle Rx and Tx according to MSI-X vector */
6383c0b3 1159 for_each_cos_in_tx_queue(fp, cos)
65565884 1160 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1161
523224a3 1162 prefetch(&fp->sb_running_index[SM_RX_ID]);
f5fbf115 1163 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
9f6c9258
DK
1164
1165 return IRQ_HANDLED;
1166}
1167
9f6c9258
DK
1168/* HW Lock for shared dual port PHYs */
1169void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1170{
1171 mutex_lock(&bp->port.phy_mutex);
1172
8203c4b6 1173 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1174}
1175
1176void bnx2x_release_phy_lock(struct bnx2x *bp)
1177{
8203c4b6 1178 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1179
1180 mutex_unlock(&bp->port.phy_mutex);
1181}
1182
0793f83f
DK
1183/* calculates MF speed according to current linespeed and MF configuration */
1184u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1185{
1186 u16 line_speed = bp->link_vars.line_speed;
1187 if (IS_MF(bp)) {
faa6fcbb
DK
1188 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1189 bp->mf_config[BP_VN(bp)]);
1190
1191 /* Calculate the current MAX line speed limit for the MF
1192 * devices
0793f83f 1193 */
faa6fcbb
DK
1194 if (IS_MF_SI(bp))
1195 line_speed = (line_speed * maxCfg) / 100;
1196 else { /* SD mode */
0793f83f
DK
1197 u16 vn_max_rate = maxCfg * 100;
1198
1199 if (vn_max_rate < line_speed)
1200 line_speed = vn_max_rate;
faa6fcbb 1201 }
0793f83f
DK
1202 }
1203
1204 return line_speed;
1205}
1206
2ae17f66
VZ
1207/**
1208 * bnx2x_fill_report_data - fill link report data to report
1209 *
1210 * @bp: driver handle
1211 * @data: link state to update
1212 *
1213 * It uses a none-atomic bit operations because is called under the mutex.
1214 */
1191cb83
ED
1215static void bnx2x_fill_report_data(struct bnx2x *bp,
1216 struct bnx2x_link_report_data *data)
2ae17f66 1217{
2ae17f66
VZ
1218 memset(data, 0, sizeof(*data));
1219
6495d15a
DK
1220 if (IS_PF(bp)) {
1221 /* Fill the report data: effective line speed */
1222 data->line_speed = bnx2x_get_mf_speed(bp);
1223
1224 /* Link is down */
1225 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1226 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1227 &data->link_report_flags);
1228
1229 if (!BNX2X_NUM_ETH_QUEUES(bp))
1230 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1231 &data->link_report_flags);
1232
1233 /* Full DUPLEX */
1234 if (bp->link_vars.duplex == DUPLEX_FULL)
1235 __set_bit(BNX2X_LINK_REPORT_FD,
1236 &data->link_report_flags);
1237
1238 /* Rx Flow Control is ON */
1239 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1240 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1241 &data->link_report_flags);
1242
1243 /* Tx Flow Control is ON */
1244 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1245 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1246 &data->link_report_flags);
1247 } else { /* VF */
1248 *data = bp->vf_link_vars;
1249 }
2ae17f66
VZ
1250}
1251
1252/**
1253 * bnx2x_link_report - report link status to OS.
1254 *
1255 * @bp: driver handle
1256 *
1257 * Calls the __bnx2x_link_report() under the same locking scheme
1258 * as a link/PHY state managing code to ensure a consistent link
1259 * reporting.
1260 */
1261
9f6c9258
DK
1262void bnx2x_link_report(struct bnx2x *bp)
1263{
2ae17f66
VZ
1264 bnx2x_acquire_phy_lock(bp);
1265 __bnx2x_link_report(bp);
1266 bnx2x_release_phy_lock(bp);
1267}
9f6c9258 1268
2ae17f66
VZ
1269/**
1270 * __bnx2x_link_report - report link status to OS.
1271 *
1272 * @bp: driver handle
1273 *
16a5fd92 1274 * None atomic implementation.
2ae17f66
VZ
1275 * Should be called under the phy_lock.
1276 */
1277void __bnx2x_link_report(struct bnx2x *bp)
1278{
1279 struct bnx2x_link_report_data cur_data;
9f6c9258 1280
2ae17f66 1281 /* reread mf_cfg */
ad5afc89 1282 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1283 bnx2x_read_mf_cfg(bp);
1284
1285 /* Read the current link report info */
1286 bnx2x_fill_report_data(bp, &cur_data);
1287
1288 /* Don't report link down or exactly the same link status twice */
1289 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1290 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1291 &bp->last_reported_link.link_report_flags) &&
1292 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1293 &cur_data.link_report_flags)))
1294 return;
1295
1296 bp->link_cnt++;
9f6c9258 1297
2ae17f66
VZ
1298 /* We are going to report a new link parameters now -
1299 * remember the current data for the next time.
1300 */
1301 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1302
6495d15a
DK
1303 /* propagate status to VFs */
1304 if (IS_PF(bp))
1305 bnx2x_iov_link_update(bp);
1306
2ae17f66
VZ
1307 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1308 &cur_data.link_report_flags)) {
1309 netif_carrier_off(bp->dev);
1310 netdev_err(bp->dev, "NIC Link is Down\n");
1311 return;
1312 } else {
94f05b0f
JP
1313 const char *duplex;
1314 const char *flow;
1315
2ae17f66 1316 netif_carrier_on(bp->dev);
9f6c9258 1317
2ae17f66
VZ
1318 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1319 &cur_data.link_report_flags))
94f05b0f 1320 duplex = "full";
9f6c9258 1321 else
94f05b0f 1322 duplex = "half";
9f6c9258 1323
2ae17f66
VZ
1324 /* Handle the FC at the end so that only these flags would be
1325 * possibly set. This way we may easily check if there is no FC
1326 * enabled.
1327 */
1328 if (cur_data.link_report_flags) {
1329 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1330 &cur_data.link_report_flags)) {
2ae17f66
VZ
1331 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1332 &cur_data.link_report_flags))
94f05b0f
JP
1333 flow = "ON - receive & transmit";
1334 else
1335 flow = "ON - receive";
9f6c9258 1336 } else {
94f05b0f 1337 flow = "ON - transmit";
9f6c9258 1338 }
94f05b0f
JP
1339 } else {
1340 flow = "none";
9f6c9258 1341 }
94f05b0f
JP
1342 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1343 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1344 }
1345}
1346
1191cb83
ED
1347static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1348{
1349 int i;
1350
1351 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1352 struct eth_rx_sge *sge;
1353
1354 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1355 sge->addr_hi =
1356 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1357 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1358
1359 sge->addr_lo =
1360 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1361 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1362 }
1363}
1364
1365static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1366 struct bnx2x_fastpath *fp, int last)
1367{
1368 int i;
1369
1370 for (i = 0; i < last; i++) {
1371 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1372 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1373 u8 *data = first_buf->data;
1374
1375 if (data == NULL) {
1376 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1377 continue;
1378 }
1379 if (tpa_info->tpa_state == BNX2X_TPA_START)
1380 dma_unmap_single(&bp->pdev->dev,
1381 dma_unmap_addr(first_buf, mapping),
1382 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1383 bnx2x_frag_free(fp, data);
1191cb83
ED
1384 first_buf->data = NULL;
1385 }
1386}
1387
55c11941
MS
1388void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1389{
1390 int j;
1391
1392 for_each_rx_queue_cnic(bp, j) {
1393 struct bnx2x_fastpath *fp = &bp->fp[j];
1394
1395 fp->rx_bd_cons = 0;
1396
1397 /* Activate BD ring */
1398 /* Warning!
1399 * this will generate an interrupt (to the TSTORM)
1400 * must only be done after chip is initialized
1401 */
1402 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1403 fp->rx_sge_prod);
1404 }
1405}
1406
9f6c9258
DK
1407void bnx2x_init_rx_rings(struct bnx2x *bp)
1408{
1409 int func = BP_FUNC(bp);
523224a3 1410 u16 ring_prod;
9f6c9258 1411 int i, j;
25141580 1412
b3b83c3f 1413 /* Allocate TPA resources */
55c11941 1414 for_each_eth_queue(bp, j) {
523224a3 1415 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1416
a8c94b91
VZ
1417 DP(NETIF_MSG_IFUP,
1418 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1419
7e6b4d44 1420 if (fp->mode != TPA_MODE_DISABLED) {
16a5fd92 1421 /* Fill the per-aggregation pool */
dfacf138 1422 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1423 struct bnx2x_agg_info *tpa_info =
1424 &fp->tpa_info[i];
1425 struct sw_rx_bd *first_buf =
1426 &tpa_info->first_buf;
1427
996dedba
MS
1428 first_buf->data =
1429 bnx2x_frag_alloc(fp, GFP_KERNEL);
e52fcb24 1430 if (!first_buf->data) {
51c1a580
MS
1431 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1432 j);
9f6c9258 1433 bnx2x_free_tpa_pool(bp, fp, i);
7e6b4d44 1434 fp->mode = TPA_MODE_DISABLED;
9f6c9258
DK
1435 break;
1436 }
619c5cb6
VZ
1437 dma_unmap_addr_set(first_buf, mapping, 0);
1438 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1439 }
523224a3
DK
1440
1441 /* "next page" elements initialization */
1442 bnx2x_set_next_page_sgl(fp);
1443
1444 /* set SGEs bit mask */
1445 bnx2x_init_sge_ring_bit_mask(fp);
1446
1447 /* Allocate SGEs and initialize the ring elements */
1448 for (i = 0, ring_prod = 0;
1449 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1450
996dedba
MS
1451 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1452 GFP_KERNEL) < 0) {
51c1a580
MS
1453 BNX2X_ERR("was only able to allocate %d rx sges\n",
1454 i);
1455 BNX2X_ERR("disabling TPA for queue[%d]\n",
1456 j);
523224a3 1457 /* Cleanup already allocated elements */
619c5cb6
VZ
1458 bnx2x_free_rx_sge_range(bp, fp,
1459 ring_prod);
1460 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1461 MAX_AGG_QS(bp));
7e6b4d44 1462 fp->mode = TPA_MODE_DISABLED;
523224a3
DK
1463 ring_prod = 0;
1464 break;
1465 }
1466 ring_prod = NEXT_SGE_IDX(ring_prod);
1467 }
1468
1469 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1470 }
1471 }
1472
55c11941 1473 for_each_eth_queue(bp, j) {
9f6c9258
DK
1474 struct bnx2x_fastpath *fp = &bp->fp[j];
1475
1476 fp->rx_bd_cons = 0;
9f6c9258 1477
b3b83c3f
DK
1478 /* Activate BD ring */
1479 /* Warning!
1480 * this will generate an interrupt (to the TSTORM)
1481 * must only be done after chip is initialized
1482 */
1483 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1484 fp->rx_sge_prod);
9f6c9258 1485
9f6c9258
DK
1486 if (j != 0)
1487 continue;
1488
619c5cb6 1489 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1490 REG_WR(bp, BAR_USTRORM_INTMEM +
1491 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1492 U64_LO(fp->rx_comp_mapping));
1493 REG_WR(bp, BAR_USTRORM_INTMEM +
1494 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1495 U64_HI(fp->rx_comp_mapping));
1496 }
9f6c9258
DK
1497 }
1498}
f85582f8 1499
55c11941 1500static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1501{
6383c0b3 1502 u8 cos;
55c11941 1503 struct bnx2x *bp = fp->bp;
9f6c9258 1504
55c11941
MS
1505 for_each_cos_in_tx_queue(fp, cos) {
1506 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1507 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1508
55c11941
MS
1509 u16 sw_prod = txdata->tx_pkt_prod;
1510 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1511
55c11941
MS
1512 while (sw_cons != sw_prod) {
1513 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1514 &pkts_compl, &bytes_compl);
1515 sw_cons++;
9f6c9258 1516 }
55c11941
MS
1517
1518 netdev_tx_reset_queue(
1519 netdev_get_tx_queue(bp->dev,
1520 txdata->txq_index));
1521 }
1522}
1523
1524static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1525{
1526 int i;
1527
1528 for_each_tx_queue_cnic(bp, i) {
1529 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1530 }
1531}
1532
1533static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1534{
1535 int i;
1536
1537 for_each_eth_queue(bp, i) {
1538 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1539 }
1540}
1541
b3b83c3f
DK
1542static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1543{
1544 struct bnx2x *bp = fp->bp;
1545 int i;
1546
1547 /* ring wasn't allocated */
1548 if (fp->rx_buf_ring == NULL)
1549 return;
1550
1551 for (i = 0; i < NUM_RX_BD; i++) {
1552 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1553 u8 *data = rx_buf->data;
b3b83c3f 1554
e52fcb24 1555 if (data == NULL)
b3b83c3f 1556 continue;
b3b83c3f
DK
1557 dma_unmap_single(&bp->pdev->dev,
1558 dma_unmap_addr(rx_buf, mapping),
1559 fp->rx_buf_size, DMA_FROM_DEVICE);
1560
e52fcb24 1561 rx_buf->data = NULL;
d46d132c 1562 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1563 }
1564}
1565
55c11941
MS
1566static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1567{
1568 int j;
1569
1570 for_each_rx_queue_cnic(bp, j) {
1571 bnx2x_free_rx_bds(&bp->fp[j]);
1572 }
1573}
1574
9f6c9258
DK
1575static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1576{
b3b83c3f 1577 int j;
9f6c9258 1578
55c11941 1579 for_each_eth_queue(bp, j) {
9f6c9258
DK
1580 struct bnx2x_fastpath *fp = &bp->fp[j];
1581
b3b83c3f 1582 bnx2x_free_rx_bds(fp);
9f6c9258 1583
7e6b4d44 1584 if (fp->mode != TPA_MODE_DISABLED)
dfacf138 1585 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1586 }
1587}
1588
a8f47eb7 1589static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
55c11941
MS
1590{
1591 bnx2x_free_tx_skbs_cnic(bp);
1592 bnx2x_free_rx_skbs_cnic(bp);
1593}
1594
9f6c9258
DK
1595void bnx2x_free_skbs(struct bnx2x *bp)
1596{
1597 bnx2x_free_tx_skbs(bp);
1598 bnx2x_free_rx_skbs(bp);
1599}
1600
e3835b99
DK
1601void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1602{
1603 /* load old values */
1604 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1605
1606 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1607 /* leave all but MAX value */
1608 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1609
1610 /* set new MAX value */
1611 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1612 & FUNC_MF_CFG_MAX_BW_MASK;
1613
1614 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1615 }
1616}
1617
ca92429f
DK
1618/**
1619 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1620 *
1621 * @bp: driver handle
1622 * @nvecs: number of vectors to be released
1623 */
1624static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1625{
ca92429f 1626 int i, offset = 0;
9f6c9258 1627
ca92429f
DK
1628 if (nvecs == offset)
1629 return;
ad5afc89
AE
1630
1631 /* VFs don't have a default SB */
1632 if (IS_PF(bp)) {
1633 free_irq(bp->msix_table[offset].vector, bp->dev);
1634 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1635 bp->msix_table[offset].vector);
1636 offset++;
1637 }
55c11941
MS
1638
1639 if (CNIC_SUPPORT(bp)) {
1640 if (nvecs == offset)
1641 return;
1642 offset++;
1643 }
ca92429f 1644
ec6ba945 1645 for_each_eth_queue(bp, i) {
ca92429f
DK
1646 if (nvecs == offset)
1647 return;
51c1a580
MS
1648 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1649 i, bp->msix_table[offset].vector);
9f6c9258 1650
ca92429f 1651 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1652 }
1653}
1654
d6214d7a 1655void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1656{
30a5de77 1657 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1658 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1659 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1660
1661 /* vfs don't have a default status block */
1662 if (IS_PF(bp))
1663 nvecs++;
1664
1665 bnx2x_free_msix_irqs(bp, nvecs);
1666 } else {
30a5de77 1667 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1668 }
9f6c9258
DK
1669}
1670
0e8d2ec5 1671int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1672{
1ab4434c 1673 int msix_vec = 0, i, rc;
9f6c9258 1674
1ab4434c
AE
1675 /* VFs don't have a default status block */
1676 if (IS_PF(bp)) {
1677 bp->msix_table[msix_vec].entry = msix_vec;
1678 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1679 bp->msix_table[0].entry);
1680 msix_vec++;
1681 }
9f6c9258 1682
55c11941
MS
1683 /* Cnic requires an msix vector for itself */
1684 if (CNIC_SUPPORT(bp)) {
1685 bp->msix_table[msix_vec].entry = msix_vec;
1686 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1687 msix_vec, bp->msix_table[msix_vec].entry);
1688 msix_vec++;
1689 }
1690
6383c0b3 1691 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1692 for_each_eth_queue(bp, i) {
d6214d7a 1693 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1694 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1695 msix_vec, msix_vec, i);
d6214d7a 1696 msix_vec++;
9f6c9258
DK
1697 }
1698
1ab4434c
AE
1699 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1700 msix_vec);
d6214d7a 1701
a5444b17
AG
1702 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1703 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
9f6c9258
DK
1704 /*
1705 * reconfigure number of tx/rx queues according to available
1706 * MSI-X vectors
1707 */
a5444b17 1708 if (rc == -ENOSPC) {
30a5de77 1709 /* Get by with single vector */
a5444b17
AG
1710 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1711 if (rc < 0) {
30a5de77
DK
1712 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1713 rc);
1714 goto no_msix;
1715 }
1716
1717 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1718 bp->flags |= USING_SINGLE_MSIX_FLAG;
1719
55c11941
MS
1720 BNX2X_DEV_INFO("set number of queues to 1\n");
1721 bp->num_ethernet_queues = 1;
1722 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1723 } else if (rc < 0) {
a5444b17 1724 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1725 goto no_msix;
a5444b17
AG
1726 } else if (rc < msix_vec) {
1727 /* how less vectors we will have? */
1728 int diff = msix_vec - rc;
1729
1730 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1731
1732 /*
1733 * decrease number of queues by number of unallocated entries
1734 */
1735 bp->num_ethernet_queues -= diff;
1736 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1737
1738 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1739 bp->num_queues);
9f6c9258
DK
1740 }
1741
1742 bp->flags |= USING_MSIX_FLAG;
1743
1744 return 0;
30a5de77
DK
1745
1746no_msix:
1747 /* fall to INTx if not enough memory */
1748 if (rc == -ENOMEM)
1749 bp->flags |= DISABLE_MSI_FLAG;
1750
1751 return rc;
9f6c9258
DK
1752}
1753
1754static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1755{
ca92429f 1756 int i, rc, offset = 0;
9f6c9258 1757
ad5afc89
AE
1758 /* no default status block for vf */
1759 if (IS_PF(bp)) {
1760 rc = request_irq(bp->msix_table[offset++].vector,
1761 bnx2x_msix_sp_int, 0,
1762 bp->dev->name, bp->dev);
1763 if (rc) {
1764 BNX2X_ERR("request sp irq failed\n");
1765 return -EBUSY;
1766 }
9f6c9258
DK
1767 }
1768
55c11941
MS
1769 if (CNIC_SUPPORT(bp))
1770 offset++;
1771
ec6ba945 1772 for_each_eth_queue(bp, i) {
9f6c9258
DK
1773 struct bnx2x_fastpath *fp = &bp->fp[i];
1774 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1775 bp->dev->name, i);
1776
d6214d7a 1777 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1778 bnx2x_msix_fp_int, 0, fp->name, fp);
1779 if (rc) {
ca92429f
DK
1780 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1781 bp->msix_table[offset].vector, rc);
1782 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1783 return -EBUSY;
1784 }
1785
d6214d7a 1786 offset++;
9f6c9258
DK
1787 }
1788
ec6ba945 1789 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1790 if (IS_PF(bp)) {
1791 offset = 1 + CNIC_SUPPORT(bp);
1792 netdev_info(bp->dev,
1793 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1794 bp->msix_table[0].vector,
1795 0, bp->msix_table[offset].vector,
1796 i - 1, bp->msix_table[offset + i - 1].vector);
1797 } else {
1798 offset = CNIC_SUPPORT(bp);
1799 netdev_info(bp->dev,
1800 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1801 0, bp->msix_table[offset].vector,
1802 i - 1, bp->msix_table[offset + i - 1].vector);
1803 }
9f6c9258
DK
1804 return 0;
1805}
1806
d6214d7a 1807int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1808{
1809 int rc;
1810
1811 rc = pci_enable_msi(bp->pdev);
1812 if (rc) {
51c1a580 1813 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1814 return -1;
1815 }
1816 bp->flags |= USING_MSI_FLAG;
1817
1818 return 0;
1819}
1820
1821static int bnx2x_req_irq(struct bnx2x *bp)
1822{
1823 unsigned long flags;
30a5de77 1824 unsigned int irq;
9f6c9258 1825
30a5de77 1826 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1827 flags = 0;
1828 else
1829 flags = IRQF_SHARED;
1830
30a5de77
DK
1831 if (bp->flags & USING_MSIX_FLAG)
1832 irq = bp->msix_table[0].vector;
1833 else
1834 irq = bp->pdev->irq;
1835
1836 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1837}
1838
c957d09f 1839static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1840{
1841 int rc = 0;
30a5de77
DK
1842 if (bp->flags & USING_MSIX_FLAG &&
1843 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1844 rc = bnx2x_req_msix_irqs(bp);
1845 if (rc)
1846 return rc;
1847 } else {
619c5cb6
VZ
1848 rc = bnx2x_req_irq(bp);
1849 if (rc) {
1850 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1851 return rc;
1852 }
1853 if (bp->flags & USING_MSI_FLAG) {
1854 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1855 netdev_info(bp->dev, "using MSI IRQ %d\n",
1856 bp->dev->irq);
1857 }
1858 if (bp->flags & USING_MSIX_FLAG) {
1859 bp->dev->irq = bp->msix_table[0].vector;
1860 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1861 bp->dev->irq);
619c5cb6
VZ
1862 }
1863 }
1864
1865 return 0;
1866}
1867
55c11941
MS
1868static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1869{
1870 int i;
1871
8f20aa57 1872 for_each_rx_queue_cnic(bp, i) {
074975d0 1873 bnx2x_fp_busy_poll_init(&bp->fp[i]);
55c11941 1874 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1875 }
55c11941
MS
1876}
1877
1191cb83 1878static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1879{
1880 int i;
1881
8f20aa57 1882 for_each_eth_queue(bp, i) {
074975d0 1883 bnx2x_fp_busy_poll_init(&bp->fp[i]);
9f6c9258 1884 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1885 }
9f6c9258
DK
1886}
1887
55c11941
MS
1888static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1889{
1890 int i;
1891
8f20aa57 1892 for_each_rx_queue_cnic(bp, i) {
55c11941 1893 napi_disable(&bnx2x_fp(bp, i, napi));
9a2620c8
YM
1894 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1895 usleep_range(1000, 2000);
8f20aa57 1896 }
55c11941
MS
1897}
1898
1191cb83 1899static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1900{
1901 int i;
1902
8f20aa57 1903 for_each_eth_queue(bp, i) {
9f6c9258 1904 napi_disable(&bnx2x_fp(bp, i, napi));
9a2620c8
YM
1905 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1906 usleep_range(1000, 2000);
8f20aa57 1907 }
9f6c9258
DK
1908}
1909
1910void bnx2x_netif_start(struct bnx2x *bp)
1911{
4b7ed897
DK
1912 if (netif_running(bp->dev)) {
1913 bnx2x_napi_enable(bp);
55c11941
MS
1914 if (CNIC_LOADED(bp))
1915 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1916 bnx2x_int_enable(bp);
1917 if (bp->state == BNX2X_STATE_OPEN)
1918 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1919 }
1920}
1921
1922void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1923{
1924 bnx2x_int_disable_sync(bp, disable_hw);
1925 bnx2x_napi_disable(bp);
55c11941
MS
1926 if (CNIC_LOADED(bp))
1927 bnx2x_napi_disable_cnic(bp);
9f6c9258 1928}
9f6c9258 1929
f663dd9a 1930u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
99932d4f 1931 void *accel_priv, select_queue_fallback_t fallback)
8307fa3e 1932{
8307fa3e 1933 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1934
55c11941 1935 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1936 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1937 u16 ether_type = ntohs(hdr->h_proto);
1938
1939 /* Skip VLAN tag if present */
1940 if (ether_type == ETH_P_8021Q) {
1941 struct vlan_ethhdr *vhdr =
1942 (struct vlan_ethhdr *)skb->data;
1943
1944 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1945 }
1946
1947 /* If ethertype is FCoE or FIP - use FCoE ring */
1948 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1949 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1950 }
55c11941 1951
cdb9d6ae 1952 /* select a non-FCoE queue */
99932d4f 1953 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
8307fa3e
VZ
1954}
1955
d6214d7a
DK
1956void bnx2x_set_num_queues(struct bnx2x *bp)
1957{
96305234 1958 /* RSS queues */
55c11941 1959 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1960
a3348722 1961 /* override in STORAGE SD modes */
2e98ffc2 1962 if (IS_MF_STORAGE_ONLY(bp))
55c11941
MS
1963 bp->num_ethernet_queues = 1;
1964
ec6ba945 1965 /* Add special queues */
55c11941
MS
1966 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1967 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1968
1969 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1970}
1971
cdb9d6ae
VZ
1972/**
1973 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1974 *
1975 * @bp: Driver handle
1976 *
1977 * We currently support for at most 16 Tx queues for each CoS thus we will
1978 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1979 * bp->max_cos.
1980 *
1981 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1982 * index after all ETH L2 indices.
1983 *
1984 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1985 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
16a5fd92 1986 * 16..31,...) with indices that are not coupled with any real Tx queue.
cdb9d6ae
VZ
1987 *
1988 * The proper configuration of skb->queue_mapping is handled by
1989 * bnx2x_select_queue() and __skb_tx_hash().
1990 *
1991 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1992 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1993 */
55c11941 1994static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1995{
6383c0b3 1996 int rc, tx, rx;
ec6ba945 1997
65565884 1998 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1999 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 2000
6383c0b3 2001/* account for fcoe queue */
55c11941
MS
2002 if (include_cnic && !NO_FCOE(bp)) {
2003 rx++;
2004 tx++;
6383c0b3 2005 }
6383c0b3
AE
2006
2007 rc = netif_set_real_num_tx_queues(bp->dev, tx);
2008 if (rc) {
2009 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
2010 return rc;
2011 }
2012 rc = netif_set_real_num_rx_queues(bp->dev, rx);
2013 if (rc) {
2014 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2015 return rc;
2016 }
2017
51c1a580 2018 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
2019 tx, rx);
2020
ec6ba945
VZ
2021 return rc;
2022}
2023
1191cb83 2024static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
2025{
2026 int i;
2027
2028 for_each_queue(bp, i) {
2029 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 2030 u32 mtu;
a8c94b91
VZ
2031
2032 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2033 if (IS_FCOE_IDX(i))
2034 /*
2035 * Although there are no IP frames expected to arrive to
2036 * this ring we still want to add an
2037 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2038 * overrun attack.
2039 */
e52fcb24 2040 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 2041 else
e52fcb24
ED
2042 mtu = bp->dev->mtu;
2043 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2044 IP_HEADER_ALIGNMENT_PADDING +
2045 ETH_OVREHEAD +
2046 mtu +
2047 BNX2X_FW_RX_ALIGN_END;
16a5fd92 2048 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
d46d132c
ED
2049 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2050 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2051 else
2052 fp->rx_frag_size = 0;
a8c94b91
VZ
2053 }
2054}
2055
60cad4e6 2056static int bnx2x_init_rss(struct bnx2x *bp)
619c5cb6
VZ
2057{
2058 int i;
619c5cb6
VZ
2059 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2060
16a5fd92 2061 /* Prepare the initial contents for the indirection table if RSS is
619c5cb6
VZ
2062 * enabled
2063 */
5d317c6a
MS
2064 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2065 bp->rss_conf_obj.ind_table[i] =
96305234
DK
2066 bp->fp->cl_id +
2067 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
2068
2069 /*
2070 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2071 * per-port, so if explicit configuration is needed , do it only
2072 * for a PMF.
2073 *
2074 * For 57712 and newer on the other hand it's a per-function
2075 * configuration.
2076 */
5d317c6a 2077 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
2078}
2079
60cad4e6
AE
2080int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2081 bool config_hash, bool enable)
619c5cb6 2082{
3b603066 2083 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
2084
2085 /* Although RSS is meaningless when there is a single HW queue we
2086 * still need it enabled in order to have HW Rx hash generated.
2087 *
2088 * if (!is_eth_multi(bp))
2089 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2090 */
2091
96305234 2092 params.rss_obj = rss_obj;
619c5cb6
VZ
2093
2094 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2095
60cad4e6
AE
2096 if (enable) {
2097 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2098
2099 /* RSS configuration */
2100 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2101 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2102 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2103 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2104 if (rss_obj->udp_rss_v4)
2105 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2106 if (rss_obj->udp_rss_v6)
2107 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
e42780b6
DK
2108
2109 if (!CHIP_IS_E1x(bp))
2110 /* valid only for TUNN_MODE_GRE tunnel mode */
2111 __set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
60cad4e6
AE
2112 } else {
2113 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2114 }
619c5cb6 2115
96305234
DK
2116 /* Hash bits */
2117 params.rss_result_mask = MULTI_MASK;
619c5cb6 2118
5d317c6a 2119 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 2120
96305234
DK
2121 if (config_hash) {
2122 /* RSS keys */
e3ec69ca 2123 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
96305234 2124 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
2125 }
2126
60cad4e6
AE
2127 if (IS_PF(bp))
2128 return bnx2x_config_rss(bp, &params);
2129 else
2130 return bnx2x_vfpf_config_rss(bp, &params);
619c5cb6
VZ
2131}
2132
1191cb83 2133static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 2134{
3b603066 2135 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
2136
2137 /* Prepare parameters for function state transitions */
2138 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2139
2140 func_params.f_obj = &bp->func_obj;
2141 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2142
2143 func_params.params.hw_init.load_phase = load_code;
2144
2145 return bnx2x_func_state_change(bp, &func_params);
2146}
2147
2148/*
2149 * Cleans the object that have internal lists without sending
16a5fd92 2150 * ramrods. Should be run when interrupts are disabled.
619c5cb6 2151 */
7fa6f340 2152void bnx2x_squeeze_objects(struct bnx2x *bp)
619c5cb6
VZ
2153{
2154 int rc;
2155 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2156 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2157 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2158
2159 /***************** Cleanup MACs' object first *************************/
2160
2161 /* Wait for completion of requested */
2162 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2163 /* Perform a dry cleanup */
2164 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2165
2166 /* Clean ETH primary MAC */
2167 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2168 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2169 &ramrod_flags);
2170 if (rc != 0)
2171 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2172
2173 /* Cleanup UC list */
2174 vlan_mac_flags = 0;
2175 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2176 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2177 &ramrod_flags);
2178 if (rc != 0)
2179 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2180
2181 /***************** Now clean mcast object *****************************/
2182 rparam.mcast_obj = &bp->mcast_obj;
2183 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2184
8b09be5f
YM
2185 /* Add a DEL command... - Since we're doing a driver cleanup only,
2186 * we take a lock surrounding both the initial send and the CONTs,
2187 * as we don't want a true completion to disrupt us in the middle.
2188 */
2189 netif_addr_lock_bh(bp->dev);
619c5cb6
VZ
2190 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2191 if (rc < 0)
51c1a580
MS
2192 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2193 rc);
619c5cb6
VZ
2194
2195 /* ...and wait until all pending commands are cleared */
2196 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2197 while (rc != 0) {
2198 if (rc < 0) {
2199 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2200 rc);
8b09be5f 2201 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2202 return;
2203 }
2204
2205 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2206 }
8b09be5f 2207 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2208}
2209
2210#ifndef BNX2X_STOP_ON_ERROR
2211#define LOAD_ERROR_EXIT(bp, label) \
2212 do { \
2213 (bp)->state = BNX2X_STATE_ERROR; \
2214 goto label; \
2215 } while (0)
55c11941
MS
2216
2217#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2218 do { \
2219 bp->cnic_loaded = false; \
2220 goto label; \
2221 } while (0)
2222#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2223#define LOAD_ERROR_EXIT(bp, label) \
2224 do { \
2225 (bp)->state = BNX2X_STATE_ERROR; \
2226 (bp)->panic = 1; \
2227 return -EBUSY; \
2228 } while (0)
55c11941
MS
2229#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2230 do { \
2231 bp->cnic_loaded = false; \
2232 (bp)->panic = 1; \
2233 return -EBUSY; \
2234 } while (0)
2235#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2236
ad5afc89
AE
2237static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2238{
2239 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2240 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2241 return;
2242}
2243
2244static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2245{
8db573ba 2246 int num_groups, vf_headroom = 0;
ad5afc89 2247 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2248
ad5afc89
AE
2249 /* number of queues for statistics is number of eth queues + FCoE */
2250 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2251
ad5afc89
AE
2252 /* Total number of FW statistics requests =
2253 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2254 * and fcoe l2 queue) stats + num of queues (which includes another 1
2255 * for fcoe l2 queue if applicable)
2256 */
2257 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2258
8db573ba
AE
2259 /* vf stats appear in the request list, but their data is allocated by
2260 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2261 * it is used to determine where to place the vf stats queries in the
2262 * request struct
2263 */
2264 if (IS_SRIOV(bp))
6411280a 2265 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2266
ad5afc89
AE
2267 /* Request is built from stats_query_header and an array of
2268 * stats_query_cmd_group each of which contains
2269 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2270 * configured in the stats_query_header.
2271 */
2272 num_groups =
8db573ba
AE
2273 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2274 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2275 1 : 0));
2276
8db573ba
AE
2277 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2278 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2279 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2280 num_groups * sizeof(struct stats_query_cmd_group);
2281
2282 /* Data for statistics requests + stats_counter
2283 * stats_counter holds per-STORM counters that are incremented
2284 * when STORM has finished with the current request.
2285 * memory for FCoE offloaded statistics are counted anyway,
2286 * even if they will not be sent.
2287 * VF stats are not accounted for here as the data of VF stats is stored
2288 * in memory allocated by the VF, not here.
2289 */
2290 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2291 sizeof(struct per_pf_stats) +
2292 sizeof(struct fcoe_statistics_params) +
2293 sizeof(struct per_queue_stats) * num_queue_stats +
2294 sizeof(struct stats_counter);
2295
cd2b0389
JP
2296 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2297 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2298 if (!bp->fw_stats)
2299 goto alloc_mem_err;
ad5afc89
AE
2300
2301 /* Set shortcuts */
2302 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2303 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2304 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2305 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2306 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2307 bp->fw_stats_req_sz;
2308
6bf07b8e 2309 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
ad5afc89
AE
2310 U64_HI(bp->fw_stats_req_mapping),
2311 U64_LO(bp->fw_stats_req_mapping));
6bf07b8e 2312 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
ad5afc89
AE
2313 U64_HI(bp->fw_stats_data_mapping),
2314 U64_LO(bp->fw_stats_data_mapping));
2315 return 0;
2316
2317alloc_mem_err:
2318 bnx2x_free_fw_stats_mem(bp);
2319 BNX2X_ERR("Can't allocate FW stats memory\n");
2320 return -ENOMEM;
2321}
2322
2323/* send load request to mcp and analyze response */
2324static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2325{
178135c1
DK
2326 u32 param;
2327
ad5afc89
AE
2328 /* init fw_seq */
2329 bp->fw_seq =
2330 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2331 DRV_MSG_SEQ_NUMBER_MASK);
2332 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2333
2334 /* Get current FW pulse sequence */
2335 bp->fw_drv_pulse_wr_seq =
2336 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2337 DRV_PULSE_SEQ_MASK);
2338 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2339
178135c1
DK
2340 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2341
2342 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2343 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2344
ad5afc89 2345 /* load request */
178135c1 2346 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
ad5afc89
AE
2347
2348 /* if mcp fails to respond we must abort */
2349 if (!(*load_code)) {
2350 BNX2X_ERR("MCP response failure, aborting\n");
2351 return -EBUSY;
2352 }
2353
2354 /* If mcp refused (e.g. other port is in diagnostic mode) we
2355 * must abort
2356 */
2357 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2358 BNX2X_ERR("MCP refused load request, aborting\n");
2359 return -EBUSY;
2360 }
2361 return 0;
2362}
2363
2364/* check whether another PF has already loaded FW to chip. In
2365 * virtualized environments a pf from another VM may have already
2366 * initialized the device including loading FW
2367 */
91ebb929 2368int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
ad5afc89
AE
2369{
2370 /* is another pf loaded on this engine? */
2371 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2372 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2373 /* build my FW version dword */
2374 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2375 (BCM_5710_FW_MINOR_VERSION << 8) +
2376 (BCM_5710_FW_REVISION_VERSION << 16) +
2377 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2378
2379 /* read loaded FW from chip */
2380 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2381
2382 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2383 loaded_fw, my_fw);
2384
2385 /* abort nic load if version mismatch */
2386 if (my_fw != loaded_fw) {
91ebb929
YM
2387 if (print_err)
2388 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2389 loaded_fw, my_fw);
2390 else
2391 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2392 loaded_fw, my_fw);
ad5afc89
AE
2393 return -EBUSY;
2394 }
2395 }
2396 return 0;
2397}
2398
2399/* returns the "mcp load_code" according to global load_count array */
2400static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2401{
2402 int path = BP_PATH(bp);
2403
2404 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
a8f47eb7 2405 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2406 bnx2x_load_count[path][2]);
2407 bnx2x_load_count[path][0]++;
2408 bnx2x_load_count[path][1 + port]++;
ad5afc89 2409 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
a8f47eb7 2410 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2411 bnx2x_load_count[path][2]);
2412 if (bnx2x_load_count[path][0] == 1)
ad5afc89 2413 return FW_MSG_CODE_DRV_LOAD_COMMON;
a8f47eb7 2414 else if (bnx2x_load_count[path][1 + port] == 1)
ad5afc89
AE
2415 return FW_MSG_CODE_DRV_LOAD_PORT;
2416 else
2417 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2418}
2419
2420/* mark PMF if applicable */
2421static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2422{
2423 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2424 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2425 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2426 bp->port.pmf = 1;
2427 /* We need the barrier to ensure the ordering between the
2428 * writing to bp->port.pmf here and reading it from the
2429 * bnx2x_periodic_task().
2430 */
2431 smp_mb();
2432 } else {
2433 bp->port.pmf = 0;
452427b0
YM
2434 }
2435
ad5afc89
AE
2436 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2437}
2438
2439static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2440{
2441 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2442 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2443 (bp->common.shmem2_base)) {
2444 if (SHMEM2_HAS(bp, dcc_support))
2445 SHMEM2_WR(bp, dcc_support,
2446 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2447 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2448 if (SHMEM2_HAS(bp, afex_driver_support))
2449 SHMEM2_WR(bp, afex_driver_support,
2450 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2451 }
2452
2453 /* Set AFEX default VLAN tag to an invalid value */
2454 bp->afex_def_vlan_tag = -1;
452427b0
YM
2455}
2456
1191cb83
ED
2457/**
2458 * bnx2x_bz_fp - zero content of the fastpath structure.
2459 *
2460 * @bp: driver handle
2461 * @index: fastpath index to be zeroed
2462 *
2463 * Makes sure the contents of the bp->fp[index].napi is kept
2464 * intact.
2465 */
2466static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2467{
2468 struct bnx2x_fastpath *fp = &bp->fp[index];
65565884 2469 int cos;
1191cb83 2470 struct napi_struct orig_napi = fp->napi;
15192a8c 2471 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
d76a6111 2472
1191cb83 2473 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2474 if (fp->tpa_info)
2475 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2476 sizeof(struct bnx2x_agg_info));
2477 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2478
2479 /* Restore the NAPI object as it has been already initialized */
2480 fp->napi = orig_napi;
15192a8c 2481 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2482 fp->bp = bp;
2483 fp->index = index;
2484 if (IS_ETH_FP(fp))
2485 fp->max_cos = bp->max_cos;
2486 else
2487 /* Special queues support only one CoS */
2488 fp->max_cos = 1;
2489
65565884 2490 /* Init txdata pointers */
65565884
MS
2491 if (IS_FCOE_FP(fp))
2492 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2493 if (IS_ETH_FP(fp))
2494 for_each_cos_in_tx_queue(fp, cos)
2495 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2496 BNX2X_NUM_ETH_QUEUES(bp) + index];
2497
16a5fd92 2498 /* set the tpa flag for each queue. The tpa flag determines the queue
1191cb83
ED
2499 * minimal size so it must be set prior to queue memory allocation
2500 */
f8dcb5e3 2501 if (bp->dev->features & NETIF_F_LRO)
1191cb83 2502 fp->mode = TPA_MODE_LRO;
f8dcb5e3 2503 else if (bp->dev->features & NETIF_F_GRO &&
7e6b4d44 2504 bnx2x_mtu_allows_gro(bp->dev->mtu))
1191cb83 2505 fp->mode = TPA_MODE_GRO;
7e6b4d44
MS
2506 else
2507 fp->mode = TPA_MODE_DISABLED;
1191cb83 2508
22a8f237
MS
2509 /* We don't want TPA if it's disabled in bp
2510 * or if this is an FCoE L2 ring.
2511 */
2512 if (bp->disable_tpa || IS_FCOE_FP(fp))
7e6b4d44 2513 fp->mode = TPA_MODE_DISABLED;
55c11941
MS
2514}
2515
2516int bnx2x_load_cnic(struct bnx2x *bp)
2517{
2518 int i, rc, port = BP_PORT(bp);
2519
2520 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2521
2522 mutex_init(&bp->cnic_mutex);
2523
ad5afc89
AE
2524 if (IS_PF(bp)) {
2525 rc = bnx2x_alloc_mem_cnic(bp);
2526 if (rc) {
2527 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2528 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2529 }
55c11941
MS
2530 }
2531
2532 rc = bnx2x_alloc_fp_mem_cnic(bp);
2533 if (rc) {
2534 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2535 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2536 }
2537
2538 /* Update the number of queues with the cnic queues */
2539 rc = bnx2x_set_real_num_queues(bp, 1);
2540 if (rc) {
2541 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2542 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2543 }
2544
2545 /* Add all CNIC NAPI objects */
2546 bnx2x_add_all_napi_cnic(bp);
2547 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2548 bnx2x_napi_enable_cnic(bp);
2549
2550 rc = bnx2x_init_hw_func_cnic(bp);
2551 if (rc)
2552 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2553
2554 bnx2x_nic_init_cnic(bp);
2555
ad5afc89
AE
2556 if (IS_PF(bp)) {
2557 /* Enable Timer scan */
2558 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2559
2560 /* setup cnic queues */
2561 for_each_cnic_queue(bp, i) {
2562 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2563 if (rc) {
2564 BNX2X_ERR("Queue setup failed\n");
2565 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2566 }
55c11941
MS
2567 }
2568 }
2569
2570 /* Initialize Rx filter. */
8b09be5f 2571 bnx2x_set_rx_mode_inner(bp);
55c11941
MS
2572
2573 /* re-read iscsi info */
2574 bnx2x_get_iscsi_info(bp);
2575 bnx2x_setup_cnic_irq_info(bp);
2576 bnx2x_setup_cnic_info(bp);
2577 bp->cnic_loaded = true;
2578 if (bp->state == BNX2X_STATE_OPEN)
2579 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2580
55c11941
MS
2581 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2582
2583 return 0;
2584
2585#ifndef BNX2X_STOP_ON_ERROR
2586load_error_cnic2:
2587 /* Disable Timer scan */
2588 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2589
2590load_error_cnic1:
2591 bnx2x_napi_disable_cnic(bp);
2592 /* Update the number of queues without the cnic queues */
d9d81862 2593 if (bnx2x_set_real_num_queues(bp, 0))
55c11941
MS
2594 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2595load_error_cnic0:
2596 BNX2X_ERR("CNIC-related load failed\n");
2597 bnx2x_free_fp_mem_cnic(bp);
2598 bnx2x_free_mem_cnic(bp);
2599 return rc;
2600#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2601}
2602
9f6c9258
DK
2603/* must be called with rtnl_lock */
2604int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2605{
619c5cb6 2606 int port = BP_PORT(bp);
ad5afc89 2607 int i, rc = 0, load_code = 0;
9f6c9258 2608
55c11941
MS
2609 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2610 DP(NETIF_MSG_IFUP,
2611 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2612
9f6c9258 2613#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2614 if (unlikely(bp->panic)) {
2615 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2616 return -EPERM;
51c1a580 2617 }
9f6c9258
DK
2618#endif
2619
2620 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2621
16a5fd92 2622 /* zero the structure w/o any lock, before SP handler is initialized */
2ae17f66
VZ
2623 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2624 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2625 &bp->last_reported_link.link_report_flags);
2ae17f66 2626
ad5afc89
AE
2627 if (IS_PF(bp))
2628 /* must be called before memory allocation and HW init */
2629 bnx2x_ilt_set_info(bp);
523224a3 2630
6383c0b3
AE
2631 /*
2632 * Zero fastpath structures preserving invariants like napi, which are
2633 * allocated only once, fp index, max_cos, bp pointer.
7e6b4d44 2634 * Also set fp->mode and txdata_ptr.
b3b83c3f 2635 */
51c1a580 2636 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2637 for_each_queue(bp, i)
2638 bnx2x_bz_fp(bp, i);
55c11941
MS
2639 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2640 bp->num_cnic_queues) *
2641 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2642
55c11941 2643 bp->fcoe_init = false;
6383c0b3 2644
a8c94b91
VZ
2645 /* Set the receive queues buffer size */
2646 bnx2x_set_rx_buf_size(bp);
2647
ad5afc89
AE
2648 if (IS_PF(bp)) {
2649 rc = bnx2x_alloc_mem(bp);
2650 if (rc) {
2651 BNX2X_ERR("Unable to allocate bp memory\n");
2652 return rc;
2653 }
2654 }
2655
ad5afc89
AE
2656 /* need to be done after alloc mem, since it's self adjusting to amount
2657 * of memory available for RSS queues
2658 */
2659 rc = bnx2x_alloc_fp_mem(bp);
2660 if (rc) {
2661 BNX2X_ERR("Unable to allocate memory for fps\n");
2662 LOAD_ERROR_EXIT(bp, load_error0);
2663 }
d6214d7a 2664
e3ed4eae
DK
2665 /* Allocated memory for FW statistics */
2666 if (bnx2x_alloc_fw_stats_mem(bp))
2667 LOAD_ERROR_EXIT(bp, load_error0);
2668
8d9ac297
AE
2669 /* request pf to initialize status blocks */
2670 if (IS_VF(bp)) {
2671 rc = bnx2x_vfpf_init(bp);
2672 if (rc)
2673 LOAD_ERROR_EXIT(bp, load_error0);
2674 }
2675
b3b83c3f
DK
2676 /* As long as bnx2x_alloc_mem() may possibly update
2677 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2678 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2679 */
55c11941 2680 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2681 if (rc) {
ec6ba945 2682 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2683 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2684 }
2685
6383c0b3 2686 /* configure multi cos mappings in kernel.
16a5fd92
YM
2687 * this configuration may be overridden by a multi class queue
2688 * discipline or by a dcbx negotiation result.
6383c0b3
AE
2689 */
2690 bnx2x_setup_tc(bp->dev, bp->max_cos);
2691
26614ba5
MS
2692 /* Add all NAPI objects */
2693 bnx2x_add_all_napi(bp);
55c11941 2694 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2695 bnx2x_napi_enable(bp);
2696
ad5afc89
AE
2697 if (IS_PF(bp)) {
2698 /* set pf load just before approaching the MCP */
2699 bnx2x_set_pf_load(bp);
2700
2701 /* if mcp exists send load request and analyze response */
2702 if (!BP_NOMCP(bp)) {
2703 /* attempt to load pf */
2704 rc = bnx2x_nic_load_request(bp, &load_code);
2705 if (rc)
2706 LOAD_ERROR_EXIT(bp, load_error1);
2707
2708 /* what did mcp say? */
91ebb929 2709 rc = bnx2x_compare_fw_ver(bp, load_code, true);
ad5afc89
AE
2710 if (rc) {
2711 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2712 LOAD_ERROR_EXIT(bp, load_error2);
2713 }
ad5afc89
AE
2714 } else {
2715 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2716 }
9f6c9258 2717
ad5afc89
AE
2718 /* mark pmf if applicable */
2719 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2720
ad5afc89
AE
2721 /* Init Function state controlling object */
2722 bnx2x__init_func_obj(bp);
6383c0b3 2723
ad5afc89
AE
2724 /* Initialize HW */
2725 rc = bnx2x_init_hw(bp, load_code);
2726 if (rc) {
2727 BNX2X_ERR("HW init failed, aborting\n");
2728 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2729 LOAD_ERROR_EXIT(bp, load_error2);
2730 }
9f6c9258
DK
2731 }
2732
ecf01c22
YM
2733 bnx2x_pre_irq_nic_init(bp);
2734
d6214d7a
DK
2735 /* Connect to IRQs */
2736 rc = bnx2x_setup_irqs(bp);
523224a3 2737 if (rc) {
ad5afc89
AE
2738 BNX2X_ERR("setup irqs failed\n");
2739 if (IS_PF(bp))
2740 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2741 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2742 }
2743
619c5cb6 2744 /* Init per-function objects */
ad5afc89 2745 if (IS_PF(bp)) {
ecf01c22
YM
2746 /* Setup NIC internals and enable interrupts */
2747 bnx2x_post_irq_nic_init(bp, load_code);
2748
ad5afc89 2749 bnx2x_init_bp_objs(bp);
b56e9670 2750 bnx2x_iov_nic_init(bp);
a3348722 2751
ad5afc89
AE
2752 /* Set AFEX default VLAN tag to an invalid value */
2753 bp->afex_def_vlan_tag = -1;
2754 bnx2x_nic_load_afex_dcc(bp, load_code);
2755 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2756 rc = bnx2x_func_start(bp);
2757 if (rc) {
2758 BNX2X_ERR("Function start failed!\n");
2759 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2760
619c5cb6 2761 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2762 }
9f6c9258 2763
ad5afc89
AE
2764 /* Send LOAD_DONE command to MCP */
2765 if (!BP_NOMCP(bp)) {
2766 load_code = bnx2x_fw_command(bp,
2767 DRV_MSG_CODE_LOAD_DONE, 0);
2768 if (!load_code) {
2769 BNX2X_ERR("MCP response failure, aborting\n");
2770 rc = -EBUSY;
2771 LOAD_ERROR_EXIT(bp, load_error3);
2772 }
2773 }
9f6c9258 2774
0c14e5ce
AE
2775 /* initialize FW coalescing state machines in RAM */
2776 bnx2x_update_coalesce(bp);
60cad4e6 2777 }
0c14e5ce 2778
60cad4e6
AE
2779 /* setup the leading queue */
2780 rc = bnx2x_setup_leading(bp);
2781 if (rc) {
2782 BNX2X_ERR("Setup leading failed!\n");
2783 LOAD_ERROR_EXIT(bp, load_error3);
2784 }
ad5afc89 2785
60cad4e6
AE
2786 /* set up the rest of the queues */
2787 for_each_nondefault_eth_queue(bp, i) {
2788 if (IS_PF(bp))
2789 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2790 else /* VF */
2791 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
ad5afc89 2792 if (rc) {
60cad4e6 2793 BNX2X_ERR("Queue %d setup failed\n", i);
ad5afc89
AE
2794 LOAD_ERROR_EXIT(bp, load_error3);
2795 }
60cad4e6 2796 }
8d9ac297 2797
60cad4e6
AE
2798 /* setup rss */
2799 rc = bnx2x_init_rss(bp);
2800 if (rc) {
2801 BNX2X_ERR("PF RSS init failed\n");
2802 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2803 }
619c5cb6 2804
523224a3
DK
2805 /* Now when Clients are configured we are ready to work */
2806 bp->state = BNX2X_STATE_OPEN;
2807
619c5cb6 2808 /* Configure a ucast MAC */
ad5afc89
AE
2809 if (IS_PF(bp))
2810 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297 2811 else /* vf */
f8f4f61a
DK
2812 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2813 true);
51c1a580
MS
2814 if (rc) {
2815 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2816 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2817 }
6e30dd4e 2818
ad5afc89 2819 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2820 bnx2x_update_max_mf_config(bp, bp->pending_max);
2821 bp->pending_max = 0;
2822 }
2823
ad5afc89
AE
2824 if (bp->port.pmf) {
2825 rc = bnx2x_initial_phy_init(bp, load_mode);
2826 if (rc)
2827 LOAD_ERROR_EXIT(bp, load_error3);
2828 }
c63da990 2829 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2830
619c5cb6
VZ
2831 /* Start fast path */
2832
2833 /* Initialize Rx filter. */
8b09be5f 2834 bnx2x_set_rx_mode_inner(bp);
6e30dd4e 2835
eeed018c
MK
2836 if (bp->flags & PTP_SUPPORTED) {
2837 bnx2x_init_ptp(bp);
2838 bnx2x_configure_ptp_filters(bp);
2839 }
2840 /* Start Tx */
9f6c9258
DK
2841 switch (load_mode) {
2842 case LOAD_NORMAL:
16a5fd92 2843 /* Tx queue should be only re-enabled */
523224a3 2844 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2845 break;
2846
2847 case LOAD_OPEN:
2848 netif_tx_start_all_queues(bp->dev);
4e857c58 2849 smp_mb__after_atomic();
9f6c9258
DK
2850 break;
2851
2852 case LOAD_DIAG:
8970b2e4 2853 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2854 bp->state = BNX2X_STATE_DIAG;
2855 break;
2856
2857 default:
2858 break;
2859 }
2860
00253a8c 2861 if (bp->port.pmf)
4c704899 2862 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2863 else
9f6c9258
DK
2864 bnx2x__link_status_update(bp);
2865
2866 /* start the timer */
2867 mod_timer(&bp->timer, jiffies + bp->current_interval);
2868
55c11941
MS
2869 if (CNIC_ENABLED(bp))
2870 bnx2x_load_cnic(bp);
9f6c9258 2871
42f8277f
YM
2872 if (IS_PF(bp))
2873 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2874
ad5afc89
AE
2875 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2876 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2877 u32 val;
2878 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2879 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2880 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2881 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2882 }
2883
619c5cb6 2884 /* Wait for all pending SP commands to complete */
ad5afc89 2885 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2886 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2887 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2888 return -EBUSY;
2889 }
6891dd25 2890
9876879f
BW
2891 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2892 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2893 bnx2x_dcbx_init(bp, false);
2894
55c11941
MS
2895 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2896
9f6c9258
DK
2897 return 0;
2898
619c5cb6 2899#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2900load_error3:
ad5afc89
AE
2901 if (IS_PF(bp)) {
2902 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2903
ad5afc89
AE
2904 /* Clean queueable objects */
2905 bnx2x_squeeze_objects(bp);
2906 }
619c5cb6 2907
9f6c9258
DK
2908 /* Free SKBs, SGEs, TPA pool and driver internals */
2909 bnx2x_free_skbs(bp);
ec6ba945 2910 for_each_rx_queue(bp, i)
9f6c9258 2911 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2912
9f6c9258 2913 /* Release IRQs */
d6214d7a
DK
2914 bnx2x_free_irq(bp);
2915load_error2:
ad5afc89 2916 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2917 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2918 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2919 }
2920
2921 bp->port.pmf = 0;
9f6c9258
DK
2922load_error1:
2923 bnx2x_napi_disable(bp);
722c6f58 2924 bnx2x_del_all_napi(bp);
ad5afc89 2925
889b9af3 2926 /* clear pf_load status, as it was already set */
ad5afc89
AE
2927 if (IS_PF(bp))
2928 bnx2x_clear_pf_load(bp);
d6214d7a 2929load_error0:
ad5afc89 2930 bnx2x_free_fw_stats_mem(bp);
e3ed4eae 2931 bnx2x_free_fp_mem(bp);
9f6c9258
DK
2932 bnx2x_free_mem(bp);
2933
2934 return rc;
619c5cb6 2935#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2936}
2937
7fa6f340 2938int bnx2x_drain_tx_queues(struct bnx2x *bp)
ad5afc89
AE
2939{
2940 u8 rc = 0, cos, i;
2941
2942 /* Wait until tx fastpath tasks complete */
2943 for_each_tx_queue(bp, i) {
2944 struct bnx2x_fastpath *fp = &bp->fp[i];
2945
2946 for_each_cos_in_tx_queue(fp, cos)
2947 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2948 if (rc)
2949 return rc;
2950 }
2951 return 0;
2952}
2953
9f6c9258 2954/* must be called with rtnl_lock */
5d07d868 2955int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2956{
2957 int i;
c9ee9206
VZ
2958 bool global = false;
2959
55c11941
MS
2960 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2961
9ce392d4 2962 /* mark driver is unloaded in shmem2 */
ad5afc89 2963 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2964 u32 val;
2965 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2966 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2967 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2968 }
2969
80bfe5cc 2970 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2971 (bp->state == BNX2X_STATE_CLOSED ||
2972 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2973 /* We can get here if the driver has been unloaded
2974 * during parity error recovery and is either waiting for a
2975 * leader to complete or for other functions to unload and
2976 * then ifdown has been issued. In this case we want to
2977 * unload and let other functions to complete a recovery
2978 * process.
2979 */
9f6c9258
DK
2980 bp->recovery_state = BNX2X_RECOVERY_DONE;
2981 bp->is_leader = 0;
c9ee9206
VZ
2982 bnx2x_release_leader_lock(bp);
2983 smp_mb();
2984
51c1a580
MS
2985 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2986 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2987 return -EINVAL;
2988 }
2989
80bfe5cc 2990 /* Nothing to do during unload if previous bnx2x_nic_load()
16a5fd92 2991 * have not completed successfully - all resources are released.
80bfe5cc
YM
2992 *
2993 * we can get here only after unsuccessful ndo_* callback, during which
2994 * dev->IFF_UP flag is still on.
2995 */
2996 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2997 return 0;
2998
2999 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
3000 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3001 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3002 */
3003 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3004 smp_mb();
3005
78c3bcc5
AE
3006 /* indicate to VFs that the PF is going down */
3007 bnx2x_iov_channel_down(bp);
3008
55c11941
MS
3009 if (CNIC_LOADED(bp))
3010 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3011
9505ee37
VZ
3012 /* Stop Tx */
3013 bnx2x_tx_disable(bp);
65565884 3014 netdev_reset_tc(bp->dev);
9505ee37 3015
9f6c9258 3016 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 3017
9f6c9258 3018 del_timer_sync(&bp->timer);
f85582f8 3019
ad5afc89
AE
3020 if (IS_PF(bp)) {
3021 /* Set ALWAYS_ALIVE bit in shmem */
3022 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3023 bnx2x_drv_pulse(bp);
3024 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3025 bnx2x_save_statistics(bp);
3026 }
9f6c9258 3027
ad5afc89
AE
3028 /* wait till consumers catch up with producers in all queues */
3029 bnx2x_drain_tx_queues(bp);
9f6c9258 3030
9b176b6b
AE
3031 /* if VF indicate to PF this function is going down (PF will delete sp
3032 * elements and clear initializations
3033 */
3034 if (IS_VF(bp))
3035 bnx2x_vfpf_close_vf(bp);
3036 else if (unload_mode != UNLOAD_RECOVERY)
3037 /* if this is a normal/close unload need to clean up chip*/
5d07d868 3038 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 3039 else {
c9ee9206
VZ
3040 /* Send the UNLOAD_REQUEST to the MCP */
3041 bnx2x_send_unload_req(bp, unload_mode);
3042
16a5fd92 3043 /* Prevent transactions to host from the functions on the
c9ee9206 3044 * engine that doesn't reset global blocks in case of global
16a5fd92 3045 * attention once global blocks are reset and gates are opened
c9ee9206
VZ
3046 * (the engine which leader will perform the recovery
3047 * last).
3048 */
3049 if (!CHIP_IS_E1x(bp))
3050 bnx2x_pf_disable(bp);
3051
3052 /* Disable HW interrupts, NAPI */
523224a3 3053 bnx2x_netif_stop(bp, 1);
26614ba5
MS
3054 /* Delete all NAPI objects */
3055 bnx2x_del_all_napi(bp);
55c11941
MS
3056 if (CNIC_LOADED(bp))
3057 bnx2x_del_all_napi_cnic(bp);
523224a3 3058 /* Release IRQs */
d6214d7a 3059 bnx2x_free_irq(bp);
c9ee9206
VZ
3060
3061 /* Report UNLOAD_DONE to MCP */
5d07d868 3062 bnx2x_send_unload_done(bp, false);
523224a3 3063 }
9f6c9258 3064
619c5cb6 3065 /*
16a5fd92 3066 * At this stage no more interrupts will arrive so we may safely clean
619c5cb6
VZ
3067 * the queueable objects here in case they failed to get cleaned so far.
3068 */
ad5afc89
AE
3069 if (IS_PF(bp))
3070 bnx2x_squeeze_objects(bp);
619c5cb6 3071
79616895
VZ
3072 /* There should be no more pending SP commands at this stage */
3073 bp->sp_state = 0;
3074
9f6c9258
DK
3075 bp->port.pmf = 0;
3076
a0d307b2
DK
3077 /* clear pending work in rtnl task */
3078 bp->sp_rtnl_state = 0;
3079 smp_mb();
3080
9f6c9258
DK
3081 /* Free SKBs, SGEs, TPA pool and driver internals */
3082 bnx2x_free_skbs(bp);
55c11941
MS
3083 if (CNIC_LOADED(bp))
3084 bnx2x_free_skbs_cnic(bp);
ec6ba945 3085 for_each_rx_queue(bp, i)
9f6c9258 3086 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 3087
ad5afc89
AE
3088 bnx2x_free_fp_mem(bp);
3089 if (CNIC_LOADED(bp))
55c11941 3090 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 3091
ad5afc89 3092 if (IS_PF(bp)) {
ad5afc89
AE
3093 if (CNIC_LOADED(bp))
3094 bnx2x_free_mem_cnic(bp);
3095 }
b4cddbd6
AE
3096 bnx2x_free_mem(bp);
3097
9f6c9258 3098 bp->state = BNX2X_STATE_CLOSED;
55c11941 3099 bp->cnic_loaded = false;
9f6c9258 3100
42f8277f
YM
3101 /* Clear driver version indication in shmem */
3102 if (IS_PF(bp))
3103 bnx2x_update_mng_version(bp);
3104
c9ee9206
VZ
3105 /* Check if there are pending parity attentions. If there are - set
3106 * RECOVERY_IN_PROGRESS.
3107 */
ad5afc89 3108 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
3109 bnx2x_set_reset_in_progress(bp);
3110
3111 /* Set RESET_IS_GLOBAL if needed */
3112 if (global)
3113 bnx2x_set_reset_global(bp);
3114 }
3115
9f6c9258
DK
3116 /* The last driver must disable a "close the gate" if there is no
3117 * parity attention or "process kill" pending.
3118 */
ad5afc89
AE
3119 if (IS_PF(bp) &&
3120 !bnx2x_clear_pf_load(bp) &&
3121 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
3122 bnx2x_disable_close_the_gate(bp);
3123
55c11941
MS
3124 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3125
9f6c9258
DK
3126 return 0;
3127}
f85582f8 3128
9f6c9258
DK
3129int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3130{
3131 u16 pmcsr;
3132
adf5f6a1 3133 /* If there is no power capability, silently succeed */
29ed74c3 3134 if (!bp->pdev->pm_cap) {
51c1a580 3135 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
3136 return 0;
3137 }
3138
29ed74c3 3139 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
9f6c9258
DK
3140
3141 switch (state) {
3142 case PCI_D0:
29ed74c3 3143 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3144 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3145 PCI_PM_CTRL_PME_STATUS));
3146
3147 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3148 /* delay required during transition out of D3hot */
3149 msleep(20);
3150 break;
3151
3152 case PCI_D3hot:
3153 /* If there are other clients above don't
3154 shut down the power */
3155 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3156 return 0;
3157 /* Don't shut down the power for emulation and FPGA */
3158 if (CHIP_REV_IS_SLOW(bp))
3159 return 0;
3160
3161 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3162 pmcsr |= 3;
3163
3164 if (bp->wol)
3165 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3166
29ed74c3 3167 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3168 pmcsr);
3169
3170 /* No more memory access after this point until
3171 * device is brought back to D0.
3172 */
3173 break;
3174
3175 default:
51c1a580 3176 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3177 return -EINVAL;
3178 }
3179 return 0;
3180}
3181
9f6c9258
DK
3182/*
3183 * net_device service functions
3184 */
a8f47eb7 3185static int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
3186{
3187 int work_done = 0;
6383c0b3 3188 u8 cos;
9f6c9258
DK
3189 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3190 napi);
3191 struct bnx2x *bp = fp->bp;
3192
3193 while (1) {
3194#ifdef BNX2X_STOP_ON_ERROR
3195 if (unlikely(bp->panic)) {
3196 napi_complete(napi);
3197 return 0;
3198 }
3199#endif
8f20aa57 3200 if (!bnx2x_fp_lock_napi(fp))
24e579c8 3201 return budget;
9f6c9258 3202
6383c0b3 3203 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
3204 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3205 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 3206
9f6c9258
DK
3207 if (bnx2x_has_rx_work(fp)) {
3208 work_done += bnx2x_rx_int(fp, budget - work_done);
3209
3210 /* must not complete if we consumed full budget */
8f20aa57
DK
3211 if (work_done >= budget) {
3212 bnx2x_fp_unlock_napi(fp);
9f6c9258 3213 break;
8f20aa57 3214 }
9f6c9258
DK
3215 }
3216
074975d0
ED
3217 bnx2x_fp_unlock_napi(fp);
3218
9f6c9258 3219 /* Fall out from the NAPI loop if needed */
074975d0 3220 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3221
ec6ba945
VZ
3222 /* No need to update SB for FCoE L2 ring as long as
3223 * it's connected to the default SB and the SB
3224 * has been updated when NAPI was scheduled.
3225 */
3226 if (IS_FCOE_FP(fp)) {
3227 napi_complete(napi);
3228 break;
3229 }
9f6c9258 3230 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3231 /* bnx2x_has_rx_work() reads the status block,
3232 * thus we need to ensure that status block indices
3233 * have been actually read (bnx2x_update_fpsb_idx)
3234 * prior to this check (bnx2x_has_rx_work) so that
3235 * we won't write the "newer" value of the status block
3236 * to IGU (if there was a DMA right after
3237 * bnx2x_has_rx_work and if there is no rmb, the memory
3238 * reading (bnx2x_update_fpsb_idx) may be postponed
3239 * to right before bnx2x_ack_sb). In this case there
3240 * will never be another interrupt until there is
3241 * another update of the status block, while there
3242 * is still unhandled work.
3243 */
9f6c9258
DK
3244 rmb();
3245
3246 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3247 napi_complete(napi);
3248 /* Re-enable interrupts */
51c1a580 3249 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3250 "Update index to %d\n", fp->fp_hc_idx);
3251 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3252 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3253 IGU_INT_ENABLE, 1);
3254 break;
3255 }
3256 }
3257 }
3258
3259 return work_done;
3260}
3261
e0d1095a 3262#ifdef CONFIG_NET_RX_BUSY_POLL
8f20aa57
DK
3263/* must be called with local_bh_disable()d */
3264int bnx2x_low_latency_recv(struct napi_struct *napi)
3265{
3266 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3267 napi);
3268 struct bnx2x *bp = fp->bp;
3269 int found = 0;
3270
3271 if ((bp->state == BNX2X_STATE_CLOSED) ||
3272 (bp->state == BNX2X_STATE_ERROR) ||
f8dcb5e3 3273 (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
8f20aa57
DK
3274 return LL_FLUSH_FAILED;
3275
3276 if (!bnx2x_fp_lock_poll(fp))
3277 return LL_FLUSH_BUSY;
3278
75b29459 3279 if (bnx2x_has_rx_work(fp))
8f20aa57 3280 found = bnx2x_rx_int(fp, 4);
8f20aa57
DK
3281
3282 bnx2x_fp_unlock_poll(fp);
3283
3284 return found;
3285}
3286#endif
3287
9f6c9258
DK
3288/* we split the first BD into headers and data BDs
3289 * to ease the pain of our fellow microcode engineers
3290 * we use one mapping for both BDs
9f6c9258 3291 */
91226790
DK
3292static u16 bnx2x_tx_split(struct bnx2x *bp,
3293 struct bnx2x_fp_txdata *txdata,
3294 struct sw_tx_bd *tx_buf,
3295 struct eth_tx_start_bd **tx_bd, u16 hlen,
3296 u16 bd_prod)
9f6c9258
DK
3297{
3298 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3299 struct eth_tx_bd *d_tx_bd;
3300 dma_addr_t mapping;
3301 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3302
3303 /* first fix first BD */
9f6c9258
DK
3304 h_tx_bd->nbytes = cpu_to_le16(hlen);
3305
91226790
DK
3306 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3307 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3308
3309 /* now get a new data BD
3310 * (after the pbd) and fill it */
3311 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3312 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3313
3314 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3315 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3316
3317 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3318 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3319 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3320
3321 /* this marks the BD as one that has no individual mapping */
3322 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3323
3324 DP(NETIF_MSG_TX_QUEUED,
3325 "TSO split data size is %d (%x:%x)\n",
3326 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3327
3328 /* update tx_bd */
3329 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3330
3331 return bd_prod;
3332}
3333
86564c3f
YM
3334#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3335#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3336static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3337{
86564c3f
YM
3338 __sum16 tsum = (__force __sum16) csum;
3339
9f6c9258 3340 if (fix > 0)
86564c3f
YM
3341 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3342 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3343
3344 else if (fix < 0)
86564c3f
YM
3345 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3346 csum_partial(t_header, -fix, 0)));
9f6c9258 3347
e2593fcd 3348 return bswab16(tsum);
9f6c9258
DK
3349}
3350
91226790 3351static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3352{
3353 u32 rc;
a848ade4
DK
3354 __u8 prot = 0;
3355 __be16 protocol;
9f6c9258
DK
3356
3357 if (skb->ip_summed != CHECKSUM_PARTIAL)
a848ade4 3358 return XMIT_PLAIN;
9f6c9258 3359
a848ade4
DK
3360 protocol = vlan_get_protocol(skb);
3361 if (protocol == htons(ETH_P_IPV6)) {
3362 rc = XMIT_CSUM_V6;
3363 prot = ipv6_hdr(skb)->nexthdr;
3364 } else {
3365 rc = XMIT_CSUM_V4;
3366 prot = ip_hdr(skb)->protocol;
3367 }
9f6c9258 3368
a848ade4
DK
3369 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3370 if (inner_ip_hdr(skb)->version == 6) {
3371 rc |= XMIT_CSUM_ENC_V6;
3372 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3373 rc |= XMIT_CSUM_TCP;
9f6c9258 3374 } else {
a848ade4
DK
3375 rc |= XMIT_CSUM_ENC_V4;
3376 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
9f6c9258
DK
3377 rc |= XMIT_CSUM_TCP;
3378 }
3379 }
a848ade4
DK
3380 if (prot == IPPROTO_TCP)
3381 rc |= XMIT_CSUM_TCP;
9f6c9258 3382
36a8f39e
ED
3383 if (skb_is_gso(skb)) {
3384 if (skb_is_gso_v6(skb)) {
3385 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3386 if (rc & XMIT_CSUM_ENC)
3387 rc |= XMIT_GSO_ENC_V6;
3388 } else {
3389 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3390 if (rc & XMIT_CSUM_ENC)
3391 rc |= XMIT_GSO_ENC_V4;
3392 }
a848ade4 3393 }
9f6c9258
DK
3394
3395 return rc;
3396}
3397
3398#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3399/* check if packet requires linearization (packet is too fragmented)
3400 no need to check fragmentation if page size > 8K (there will be no
3401 violation to FW restrictions) */
3402static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3403 u32 xmit_type)
3404{
3405 int to_copy = 0;
3406 int hlen = 0;
3407 int first_bd_sz = 0;
3408
3409 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3410 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3411
3412 if (xmit_type & XMIT_GSO) {
3413 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3414 /* Check if LSO packet needs to be copied:
3415 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3416 int wnd_size = MAX_FETCH_BD - 3;
3417 /* Number of windows to check */
3418 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3419 int wnd_idx = 0;
3420 int frag_idx = 0;
3421 u32 wnd_sum = 0;
3422
3423 /* Headers length */
592b9b8d
YM
3424 if (xmit_type & XMIT_GSO_ENC)
3425 hlen = (int)(skb_inner_transport_header(skb) -
3426 skb->data) +
3427 inner_tcp_hdrlen(skb);
3428 else
3429 hlen = (int)(skb_transport_header(skb) -
3430 skb->data) + tcp_hdrlen(skb);
9f6c9258
DK
3431
3432 /* Amount of data (w/o headers) on linear part of SKB*/
3433 first_bd_sz = skb_headlen(skb) - hlen;
3434
3435 wnd_sum = first_bd_sz;
3436
3437 /* Calculate the first sum - it's special */
3438 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3439 wnd_sum +=
9e903e08 3440 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3441
3442 /* If there was data on linear skb data - check it */
3443 if (first_bd_sz > 0) {
3444 if (unlikely(wnd_sum < lso_mss)) {
3445 to_copy = 1;
3446 goto exit_lbl;
3447 }
3448
3449 wnd_sum -= first_bd_sz;
3450 }
3451
3452 /* Others are easier: run through the frag list and
3453 check all windows */
3454 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3455 wnd_sum +=
9e903e08 3456 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3457
3458 if (unlikely(wnd_sum < lso_mss)) {
3459 to_copy = 1;
3460 break;
3461 }
3462 wnd_sum -=
9e903e08 3463 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3464 }
3465 } else {
3466 /* in non-LSO too fragmented packet should always
3467 be linearized */
3468 to_copy = 1;
3469 }
3470 }
3471
3472exit_lbl:
3473 if (unlikely(to_copy))
3474 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3475 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3476 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3477 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3478
3479 return to_copy;
3480}
3481#endif
3482
f2e0899f 3483/**
e8920674 3484 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3485 *
e8920674
DK
3486 * @skb: packet skb
3487 * @pbd: parse BD
3488 * @xmit_type: xmit flags
f2e0899f 3489 */
91226790
DK
3490static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3491 struct eth_tx_parse_bd_e1x *pbd,
3492 u32 xmit_type)
f2e0899f
DK
3493{
3494 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3495 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3496 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3497
3498 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3499 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3500 pbd->tcp_pseudo_csum =
86564c3f
YM
3501 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3502 ip_hdr(skb)->daddr,
3503 0, IPPROTO_TCP, 0));
057cf65e 3504 } else {
f2e0899f 3505 pbd->tcp_pseudo_csum =
86564c3f
YM
3506 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3507 &ipv6_hdr(skb)->daddr,
3508 0, IPPROTO_TCP, 0));
057cf65e 3509 }
f2e0899f 3510
86564c3f
YM
3511 pbd->global_data |=
3512 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3513}
f85582f8 3514
a848ade4
DK
3515/**
3516 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3517 *
3518 * @bp: driver handle
3519 * @skb: packet skb
3520 * @parsing_data: data to be updated
3521 * @xmit_type: xmit flags
3522 *
3523 * 57712/578xx related, when skb has encapsulation
3524 */
3525static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3526 u32 *parsing_data, u32 xmit_type)
3527{
3528 *parsing_data |=
3529 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3530 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3531 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3532
3533 if (xmit_type & XMIT_CSUM_TCP) {
3534 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3535 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3536 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3537
3538 return skb_inner_transport_header(skb) +
3539 inner_tcp_hdrlen(skb) - skb->data;
3540 }
3541
3542 /* We support checksum offload for TCP and UDP only.
3543 * No need to pass the UDP header length - it's a constant.
3544 */
3545 return skb_inner_transport_header(skb) +
3546 sizeof(struct udphdr) - skb->data;
3547}
3548
f2e0899f 3549/**
e8920674 3550 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3551 *
e8920674
DK
3552 * @bp: driver handle
3553 * @skb: packet skb
3554 * @parsing_data: data to be updated
3555 * @xmit_type: xmit flags
f2e0899f 3556 *
91226790 3557 * 57712/578xx related
f2e0899f 3558 */
91226790
DK
3559static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3560 u32 *parsing_data, u32 xmit_type)
f2e0899f 3561{
e39aece7 3562 *parsing_data |=
2de67439 3563 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3564 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3565 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3566
e39aece7
VZ
3567 if (xmit_type & XMIT_CSUM_TCP) {
3568 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3569 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3570 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3571
e39aece7 3572 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3573 }
3574 /* We support checksum offload for TCP and UDP only.
3575 * No need to pass the UDP header length - it's a constant.
3576 */
3577 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3578}
3579
a848ade4 3580/* set FW indication according to inner or outer protocols if tunneled */
91226790
DK
3581static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3582 struct eth_tx_start_bd *tx_start_bd,
3583 u32 xmit_type)
93ef5c02 3584{
93ef5c02
DK
3585 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3586
a848ade4 3587 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
91226790 3588 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3589
3590 if (!(xmit_type & XMIT_CSUM_TCP))
3591 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3592}
3593
f2e0899f 3594/**
e8920674 3595 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3596 *
e8920674
DK
3597 * @bp: driver handle
3598 * @skb: packet skb
3599 * @pbd: parse BD to be updated
3600 * @xmit_type: xmit flags
f2e0899f 3601 */
91226790
DK
3602static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3603 struct eth_tx_parse_bd_e1x *pbd,
3604 u32 xmit_type)
f2e0899f 3605{
e39aece7 3606 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3607
3608 /* for now NS flag is not used in Linux */
3609 pbd->global_data =
86564c3f
YM
3610 cpu_to_le16(hlen |
3611 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3612 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3613
3614 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3615 skb_network_header(skb)) >> 1;
f2e0899f 3616
e39aece7
VZ
3617 hlen += pbd->ip_hlen_w;
3618
3619 /* We support checksum offload for TCP and UDP only */
3620 if (xmit_type & XMIT_CSUM_TCP)
3621 hlen += tcp_hdrlen(skb) / 2;
3622 else
3623 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3624
3625 pbd->total_hlen_w = cpu_to_le16(hlen);
3626 hlen = hlen*2;
3627
3628 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3629 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3630
3631 } else {
3632 s8 fix = SKB_CS_OFF(skb); /* signed! */
3633
3634 DP(NETIF_MSG_TX_QUEUED,
3635 "hlen %d fix %d csum before fix %x\n",
3636 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3637
3638 /* HW bug: fixup the CSUM */
3639 pbd->tcp_pseudo_csum =
3640 bnx2x_csum_fix(skb_transport_header(skb),
3641 SKB_CS(skb), fix);
3642
3643 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3644 pbd->tcp_pseudo_csum);
3645 }
3646
3647 return hlen;
3648}
f85582f8 3649
a848ade4
DK
3650static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3651 struct eth_tx_parse_bd_e2 *pbd_e2,
3652 struct eth_tx_parse_2nd_bd *pbd2,
3653 u16 *global_data,
3654 u32 xmit_type)
3655{
e287a75c 3656 u16 hlen_w = 0;
a848ade4 3657 u8 outerip_off, outerip_len = 0;
e768fb29 3658
e287a75c
DK
3659 /* from outer IP to transport */
3660 hlen_w = (skb_inner_transport_header(skb) -
3661 skb_network_header(skb)) >> 1;
a848ade4
DK
3662
3663 /* transport len */
e768fb29 3664 hlen_w += inner_tcp_hdrlen(skb) >> 1;
a848ade4 3665
e287a75c 3666 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
a848ade4 3667
e768fb29
DK
3668 /* outer IP header info */
3669 if (xmit_type & XMIT_CSUM_V4) {
e287a75c 3670 struct iphdr *iph = ip_hdr(skb);
1b4fc0e2
DK
3671 u32 csum = (__force u32)(~iph->check) -
3672 (__force u32)iph->tot_len -
3673 (__force u32)iph->frag_off;
c957d09f 3674
e42780b6
DK
3675 outerip_len = iph->ihl << 1;
3676
a848ade4 3677 pbd2->fw_ip_csum_wo_len_flags_frag =
c957d09f 3678 bswab16(csum_fold((__force __wsum)csum));
a848ade4
DK
3679 } else {
3680 pbd2->fw_ip_hdr_to_payload_w =
e287a75c 3681 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
e42780b6 3682 pbd_e2->data.tunnel_data.flags |=
05f8461b 3683 ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
a848ade4
DK
3684 }
3685
3686 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3687
3688 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3689
e42780b6
DK
3690 /* inner IP header info */
3691 if (xmit_type & XMIT_CSUM_ENC_V4) {
e287a75c 3692 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
a848ade4
DK
3693
3694 pbd_e2->data.tunnel_data.pseudo_csum =
3695 bswab16(~csum_tcpudp_magic(
3696 inner_ip_hdr(skb)->saddr,
3697 inner_ip_hdr(skb)->daddr,
3698 0, IPPROTO_TCP, 0));
a848ade4
DK
3699 } else {
3700 pbd_e2->data.tunnel_data.pseudo_csum =
3701 bswab16(~csum_ipv6_magic(
3702 &inner_ipv6_hdr(skb)->saddr,
3703 &inner_ipv6_hdr(skb)->daddr,
3704 0, IPPROTO_TCP, 0));
3705 }
3706
3707 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3708
3709 *global_data |=
3710 outerip_off |
a848ade4
DK
3711 (outerip_len <<
3712 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3713 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3714 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
65bc0cfe
DK
3715
3716 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3717 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3718 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3719 }
a848ade4
DK
3720}
3721
e42780b6
DK
3722static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3723 u32 xmit_type)
3724{
3725 struct ipv6hdr *ipv6;
3726
3727 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3728 return;
3729
3730 if (xmit_type & XMIT_GSO_ENC_V6)
3731 ipv6 = inner_ipv6_hdr(skb);
3732 else /* XMIT_GSO_V6 */
3733 ipv6 = ipv6_hdr(skb);
3734
3735 if (ipv6->nexthdr == NEXTHDR_IPV6)
3736 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3737}
3738
9f6c9258
DK
3739/* called with netif_tx_lock
3740 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3741 * netif_wake_queue()
3742 */
3743netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3744{
3745 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3746
9f6c9258 3747 struct netdev_queue *txq;
6383c0b3 3748 struct bnx2x_fp_txdata *txdata;
9f6c9258 3749 struct sw_tx_bd *tx_buf;
619c5cb6 3750 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3751 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3752 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3753 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
a848ade4 3754 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
2297a2da 3755 u32 pbd_e2_parsing_data = 0;
9f6c9258 3756 u16 pkt_prod, bd_prod;
65565884 3757 int nbd, txq_index;
9f6c9258
DK
3758 dma_addr_t mapping;
3759 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3760 int i;
3761 u8 hlen = 0;
3762 __le16 pkt_size = 0;
3763 struct ethhdr *eth;
3764 u8 mac_type = UNICAST_ADDRESS;
3765
3766#ifdef BNX2X_STOP_ON_ERROR
3767 if (unlikely(bp->panic))
3768 return NETDEV_TX_BUSY;
3769#endif
3770
6383c0b3
AE
3771 txq_index = skb_get_queue_mapping(skb);
3772 txq = netdev_get_tx_queue(dev, txq_index);
3773
55c11941 3774 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3775
65565884 3776 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3777
3778 /* enable this debug print to view the transmission queue being used
51c1a580 3779 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3780 txq_index, fp_index, txdata_index); */
9f6c9258 3781
16a5fd92 3782 /* enable this debug print to view the transmission details
51c1a580
MS
3783 DP(NETIF_MSG_TX_QUEUED,
3784 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3785 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3786
6383c0b3 3787 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3788 skb_shinfo(skb)->nr_frags +
3789 BDS_PER_TX_PKT +
3790 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3791 /* Handle special storage cases separately */
c96bdc0c
DK
3792 if (txdata->tx_ring_size == 0) {
3793 struct bnx2x_eth_q_stats *q_stats =
3794 bnx2x_fp_qstats(bp, txdata->parent_fp);
3795 q_stats->driver_filtered_tx_pkt++;
3796 dev_kfree_skb(skb);
3797 return NETDEV_TX_OK;
3798 }
2de67439
YM
3799 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3800 netif_tx_stop_queue(txq);
c96bdc0c 3801 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3802
9f6c9258
DK
3803 return NETDEV_TX_BUSY;
3804 }
3805
51c1a580 3806 DP(NETIF_MSG_TX_QUEUED,
04c46736 3807 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3808 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3809 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3810 skb->len);
9f6c9258
DK
3811
3812 eth = (struct ethhdr *)skb->data;
3813
3814 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3815 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3816 if (is_broadcast_ether_addr(eth->h_dest))
3817 mac_type = BROADCAST_ADDRESS;
3818 else
3819 mac_type = MULTICAST_ADDRESS;
3820 }
3821
91226790 3822#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3823 /* First, check if we need to linearize the skb (due to FW
3824 restrictions). No need to check fragmentation if page size > 8K
3825 (there will be no violation to FW restrictions) */
3826 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3827 /* Statistics of linearization */
3828 bp->lin_cnt++;
3829 if (skb_linearize(skb) != 0) {
51c1a580
MS
3830 DP(NETIF_MSG_TX_QUEUED,
3831 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3832 dev_kfree_skb_any(skb);
3833 return NETDEV_TX_OK;
3834 }
3835 }
3836#endif
619c5cb6
VZ
3837 /* Map skb linear data for DMA */
3838 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3839 skb_headlen(skb), DMA_TO_DEVICE);
3840 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3841 DP(NETIF_MSG_TX_QUEUED,
3842 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3843 dev_kfree_skb_any(skb);
3844 return NETDEV_TX_OK;
3845 }
9f6c9258
DK
3846 /*
3847 Please read carefully. First we use one BD which we mark as start,
3848 then we have a parsing info BD (used for TSO or xsum),
3849 and only then we have the rest of the TSO BDs.
3850 (don't forget to mark the last one as last,
3851 and to unmap only AFTER you write to the BD ...)
3852 And above all, all pdb sizes are in words - NOT DWORDS!
3853 */
3854
619c5cb6
VZ
3855 /* get current pkt produced now - advance it just before sending packet
3856 * since mapping of pages may fail and cause packet to be dropped
3857 */
6383c0b3
AE
3858 pkt_prod = txdata->tx_pkt_prod;
3859 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3860
619c5cb6
VZ
3861 /* get a tx_buf and first BD
3862 * tx_start_bd may be changed during SPLIT,
3863 * but first_bd will always stay first
3864 */
6383c0b3
AE
3865 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3866 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3867 first_bd = tx_start_bd;
9f6c9258
DK
3868
3869 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3870
eeed018c
MK
3871 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3872 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3873 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3874 } else if (bp->ptp_tx_skb) {
3875 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3876 } else {
3877 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3878 /* schedule check for Tx timestamp */
3879 bp->ptp_tx_skb = skb_get(skb);
3880 bp->ptp_tx_start = jiffies;
3881 schedule_work(&bp->ptp_task);
3882 }
3883 }
3884
91226790
DK
3885 /* header nbd: indirectly zero other flags! */
3886 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3887
3888 /* remember the first BD of the packet */
6383c0b3 3889 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3890 tx_buf->skb = skb;
3891 tx_buf->flags = 0;
3892
3893 DP(NETIF_MSG_TX_QUEUED,
3894 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3895 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3896
df8a39de 3897 if (skb_vlan_tag_present(skb)) {
523224a3 3898 tx_start_bd->vlan_or_ethertype =
df8a39de 3899 cpu_to_le16(skb_vlan_tag_get(skb));
523224a3
DK
3900 tx_start_bd->bd_flags.as_bitfield |=
3901 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3902 } else {
3903 /* when transmitting in a vf, start bd must hold the ethertype
3904 * for fw to enforce it
3905 */
ea36475a 3906#ifndef BNX2X_STOP_ON_ERROR
91226790 3907 if (IS_VF(bp))
ea36475a 3908#endif
dc1ba591
AE
3909 tx_start_bd->vlan_or_ethertype =
3910 cpu_to_le16(ntohs(eth->h_proto));
ea36475a 3911#ifndef BNX2X_STOP_ON_ERROR
91226790 3912 else
dc1ba591
AE
3913 /* used by FW for packet accounting */
3914 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
ea36475a 3915#endif
dc1ba591 3916 }
9f6c9258 3917
91226790
DK
3918 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3919
9f6c9258
DK
3920 /* turn on parsing and get a BD */
3921 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3922
93ef5c02
DK
3923 if (xmit_type & XMIT_CSUM)
3924 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3925
619c5cb6 3926 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3927 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f 3928 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
a848ade4
DK
3929
3930 if (xmit_type & XMIT_CSUM_ENC) {
3931 u16 global_data = 0;
3932
3933 /* Set PBD in enc checksum offload case */
3934 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3935 &pbd_e2_parsing_data,
3936 xmit_type);
3937
3938 /* turn on 2nd parsing and get a BD */
3939 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3940
3941 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3942
3943 memset(pbd2, 0, sizeof(*pbd2));
3944
3945 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3946 (skb_inner_network_header(skb) -
3947 skb->data) >> 1;
3948
3949 if (xmit_type & XMIT_GSO_ENC)
3950 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3951 &global_data,
3952 xmit_type);
3953
3954 pbd2->global_data = cpu_to_le16(global_data);
3955
3956 /* add addition parse BD indication to start BD */
3957 SET_FLAG(tx_start_bd->general_data,
3958 ETH_TX_START_BD_PARSE_NBDS, 1);
3959 /* set encapsulation flag in start BD */
3960 SET_FLAG(tx_start_bd->general_data,
3961 ETH_TX_START_BD_TUNNEL_EXIST, 1);
fe26566d
DK
3962
3963 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3964
a848ade4
DK
3965 nbd++;
3966 } else if (xmit_type & XMIT_CSUM) {
91226790 3967 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3968 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3969 &pbd_e2_parsing_data,
3970 xmit_type);
a848ade4 3971 }
dc1ba591 3972
e42780b6 3973 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
babe723d
YM
3974 /* Add the macs to the parsing BD if this is a vf or if
3975 * Tx Switching is enabled.
3976 */
91226790
DK
3977 if (IS_VF(bp)) {
3978 /* override GRE parameters in BD */
3979 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3980 &pbd_e2->data.mac_addr.src_mid,
3981 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 3982 eth->h_source);
91226790 3983
babe723d
YM
3984 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3985 &pbd_e2->data.mac_addr.dst_mid,
3986 &pbd_e2->data.mac_addr.dst_lo,
3987 eth->h_dest);
ea36475a
YM
3988 } else {
3989 if (bp->flags & TX_SWITCHING)
3990 bnx2x_set_fw_mac_addr(
3991 &pbd_e2->data.mac_addr.dst_hi,
3992 &pbd_e2->data.mac_addr.dst_mid,
3993 &pbd_e2->data.mac_addr.dst_lo,
3994 eth->h_dest);
3995#ifdef BNX2X_STOP_ON_ERROR
3996 /* Enforce security is always set in Stop on Error -
3997 * source mac should be present in the parsing BD
3998 */
3999 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4000 &pbd_e2->data.mac_addr.src_mid,
4001 &pbd_e2->data.mac_addr.src_lo,
4002 eth->h_source);
4003#endif
619c5cb6 4004 }
96bed4b9
YM
4005
4006 SET_FLAG(pbd_e2_parsing_data,
4007 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 4008 } else {
96bed4b9 4009 u16 global_data = 0;
6383c0b3 4010 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
4011 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4012 /* Set PBD in checksum offload case */
4013 if (xmit_type & XMIT_CSUM)
4014 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 4015
96bed4b9
YM
4016 SET_FLAG(global_data,
4017 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4018 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
4019 }
4020
f85582f8 4021 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
4022 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4023 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
4024 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4025 pkt_size = tx_start_bd->nbytes;
4026
51c1a580 4027 DP(NETIF_MSG_TX_QUEUED,
91226790 4028 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 4029 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 4030 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
4031 tx_start_bd->bd_flags.as_bitfield,
4032 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
4033
4034 if (xmit_type & XMIT_GSO) {
4035
4036 DP(NETIF_MSG_TX_QUEUED,
4037 "TSO packet len %d hlen %d total len %d tso size %d\n",
4038 skb->len, hlen, skb_headlen(skb),
4039 skb_shinfo(skb)->gso_size);
4040
4041 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4042
91226790
DK
4043 if (unlikely(skb_headlen(skb) > hlen)) {
4044 nbd++;
6383c0b3
AE
4045 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4046 &tx_start_bd, hlen,
91226790
DK
4047 bd_prod);
4048 }
619c5cb6 4049 if (!CHIP_IS_E1x(bp))
e42780b6
DK
4050 pbd_e2_parsing_data |=
4051 (skb_shinfo(skb)->gso_size <<
4052 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4053 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f 4054 else
e42780b6 4055 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 4056 }
2297a2da
VZ
4057
4058 /* Set the PBD's parsing_data field if not zero
4059 * (for the chips newer than 57711).
4060 */
4061 if (pbd_e2_parsing_data)
4062 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4063
9f6c9258
DK
4064 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4065
f85582f8 4066 /* Handle fragmented skb */
9f6c9258
DK
4067 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4068 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4069
9e903e08
ED
4070 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4071 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 4072 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 4073 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 4074
51c1a580
MS
4075 DP(NETIF_MSG_TX_QUEUED,
4076 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
4077
4078 /* we need unmap all buffers already mapped
4079 * for this SKB;
4080 * first_bd->nbd need to be properly updated
4081 * before call to bnx2x_free_tx_pkt
4082 */
4083 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 4084 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
4085 TX_BD(txdata->tx_pkt_prod),
4086 &pkts_compl, &bytes_compl);
619c5cb6
VZ
4087 return NETDEV_TX_OK;
4088 }
4089
9f6c9258 4090 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 4091 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 4092 if (total_pkt_bd == NULL)
6383c0b3 4093 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 4094
9f6c9258
DK
4095 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4096 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
4097 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4098 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 4099 nbd++;
9f6c9258
DK
4100
4101 DP(NETIF_MSG_TX_QUEUED,
4102 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4103 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4104 le16_to_cpu(tx_data_bd->nbytes));
4105 }
4106
4107 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4108
619c5cb6
VZ
4109 /* update with actual num BDs */
4110 first_bd->nbd = cpu_to_le16(nbd);
4111
9f6c9258
DK
4112 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4113
4114 /* now send a tx doorbell, counting the next BD
4115 * if the packet contains or ends with it
4116 */
4117 if (TX_BD_POFF(bd_prod) < nbd)
4118 nbd++;
4119
619c5cb6
VZ
4120 /* total_pkt_bytes should be set on the first data BD if
4121 * it's not an LSO packet and there is more than one
4122 * data BD. In this case pkt_size is limited by an MTU value.
4123 * However we prefer to set it for an LSO packet (while we don't
4124 * have to) in order to save some CPU cycles in a none-LSO
4125 * case, when we much more care about them.
4126 */
9f6c9258
DK
4127 if (total_pkt_bd != NULL)
4128 total_pkt_bd->total_pkt_bytes = pkt_size;
4129
523224a3 4130 if (pbd_e1x)
9f6c9258 4131 DP(NETIF_MSG_TX_QUEUED,
51c1a580 4132 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
4133 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4134 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4135 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4136 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
4137 if (pbd_e2)
4138 DP(NETIF_MSG_TX_QUEUED,
4139 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
4140 pbd_e2,
4141 pbd_e2->data.mac_addr.dst_hi,
4142 pbd_e2->data.mac_addr.dst_mid,
4143 pbd_e2->data.mac_addr.dst_lo,
4144 pbd_e2->data.mac_addr.src_hi,
4145 pbd_e2->data.mac_addr.src_mid,
4146 pbd_e2->data.mac_addr.src_lo,
f2e0899f 4147 pbd_e2->parsing_data);
9f6c9258
DK
4148 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4149
2df1a70a
TH
4150 netdev_tx_sent_queue(txq, skb->len);
4151
8373c57d
WB
4152 skb_tx_timestamp(skb);
4153
6383c0b3 4154 txdata->tx_pkt_prod++;
9f6c9258
DK
4155 /*
4156 * Make sure that the BD data is updated before updating the producer
4157 * since FW might read the BD right after the producer is updated.
4158 * This is only applicable for weak-ordered memory model archs such
4159 * as IA-64. The following barrier is also mandatory since FW will
4160 * assumes packets must have BDs.
4161 */
4162 wmb();
4163
6383c0b3 4164 txdata->tx_db.data.prod += nbd;
9f6c9258 4165 barrier();
f85582f8 4166
6383c0b3 4167 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
4168
4169 mmiowb();
4170
6383c0b3 4171 txdata->tx_bd_prod += nbd;
9f6c9258 4172
7df2dc6b 4173 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
4174 netif_tx_stop_queue(txq);
4175
4176 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4177 * ordering of set_bit() in netif_tx_stop_queue() and read of
4178 * fp->bd_tx_cons */
4179 smp_mb();
4180
15192a8c 4181 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 4182 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
4183 netif_tx_wake_queue(txq);
4184 }
6383c0b3 4185 txdata->tx_pkt++;
9f6c9258
DK
4186
4187 return NETDEV_TX_OK;
4188}
f85582f8 4189
6383c0b3
AE
4190/**
4191 * bnx2x_setup_tc - routine to configure net_device for multi tc
4192 *
4193 * @netdev: net device to configure
4194 * @tc: number of traffic classes to enable
4195 *
4196 * callback connected to the ndo_setup_tc function pointer
4197 */
4198int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4199{
4200 int cos, prio, count, offset;
4201 struct bnx2x *bp = netdev_priv(dev);
4202
4203 /* setup tc must be called under rtnl lock */
4204 ASSERT_RTNL();
4205
16a5fd92 4206 /* no traffic classes requested. Aborting */
6383c0b3
AE
4207 if (!num_tc) {
4208 netdev_reset_tc(dev);
4209 return 0;
4210 }
4211
4212 /* requested to support too many traffic classes */
4213 if (num_tc > bp->max_cos) {
6bf07b8e 4214 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
51c1a580 4215 num_tc, bp->max_cos);
6383c0b3
AE
4216 return -EINVAL;
4217 }
4218
4219 /* declare amount of supported traffic classes */
4220 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 4221 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
4222 return -EINVAL;
4223 }
4224
4225 /* configure priority to traffic class mapping */
4226 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4227 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
4228 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4229 "mapping priority %d to tc %d\n",
6383c0b3
AE
4230 prio, bp->prio_to_cos[prio]);
4231 }
4232
16a5fd92 4233 /* Use this configuration to differentiate tc0 from other COSes
6383c0b3
AE
4234 This can be used for ets or pfc, and save the effort of setting
4235 up a multio class queue disc or negotiating DCBX with a switch
4236 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 4237 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
4238 for (prio = 1; prio < 16; prio++) {
4239 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 4240 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
4241 } */
4242
4243 /* configure traffic class to transmission queue mapping */
4244 for (cos = 0; cos < bp->max_cos; cos++) {
4245 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 4246 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 4247 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
4248 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4249 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
4250 cos, offset, count);
4251 }
4252
4253 return 0;
4254}
4255
9f6c9258
DK
4256/* called with rtnl_lock */
4257int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4258{
4259 struct sockaddr *addr = p;
4260 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 4261 int rc = 0;
9f6c9258 4262
2e98ffc2 4263 if (!is_valid_ether_addr(addr->sa_data)) {
51c1a580 4264 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 4265 return -EINVAL;
51c1a580 4266 }
614c76df 4267
2e98ffc2
DK
4268 if (IS_MF_STORAGE_ONLY(bp)) {
4269 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
9f6c9258 4270 return -EINVAL;
51c1a580 4271 }
9f6c9258 4272
619c5cb6
VZ
4273 if (netif_running(dev)) {
4274 rc = bnx2x_set_eth_mac(bp, false);
4275 if (rc)
4276 return rc;
4277 }
4278
9f6c9258 4279 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 4280
523224a3 4281 if (netif_running(dev))
619c5cb6 4282 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 4283
619c5cb6 4284 return rc;
9f6c9258
DK
4285}
4286
b3b83c3f
DK
4287static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4288{
4289 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4290 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 4291 u8 cos;
b3b83c3f
DK
4292
4293 /* Common */
55c11941 4294
b3b83c3f
DK
4295 if (IS_FCOE_IDX(fp_index)) {
4296 memset(sb, 0, sizeof(union host_hc_status_block));
4297 fp->status_blk_mapping = 0;
b3b83c3f 4298 } else {
b3b83c3f 4299 /* status blocks */
619c5cb6 4300 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4301 BNX2X_PCI_FREE(sb->e2_sb,
4302 bnx2x_fp(bp, fp_index,
4303 status_blk_mapping),
4304 sizeof(struct host_hc_status_block_e2));
4305 else
4306 BNX2X_PCI_FREE(sb->e1x_sb,
4307 bnx2x_fp(bp, fp_index,
4308 status_blk_mapping),
4309 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4310 }
55c11941 4311
b3b83c3f
DK
4312 /* Rx */
4313 if (!skip_rx_queue(bp, fp_index)) {
4314 bnx2x_free_rx_bds(fp);
4315
4316 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4317 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4318 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4319 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4320 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4321
4322 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4323 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4324 sizeof(struct eth_fast_path_rx_cqe) *
4325 NUM_RCQ_BD);
4326
4327 /* SGE ring */
4328 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4329 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4330 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4331 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4332 }
4333
4334 /* Tx */
4335 if (!skip_tx_queue(bp, fp_index)) {
4336 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4337 for_each_cos_in_tx_queue(fp, cos) {
65565884 4338 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4339
51c1a580 4340 DP(NETIF_MSG_IFDOWN,
94f05b0f 4341 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
4342 fp_index, cos, txdata->cid);
4343
4344 BNX2X_FREE(txdata->tx_buf_ring);
4345 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4346 txdata->tx_desc_mapping,
4347 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4348 }
b3b83c3f
DK
4349 }
4350 /* end of fastpath */
4351}
4352
a8f47eb7 4353static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4354{
4355 int i;
4356 for_each_cnic_queue(bp, i)
4357 bnx2x_free_fp_mem_at(bp, i);
4358}
4359
b3b83c3f
DK
4360void bnx2x_free_fp_mem(struct bnx2x *bp)
4361{
4362 int i;
55c11941 4363 for_each_eth_queue(bp, i)
b3b83c3f
DK
4364 bnx2x_free_fp_mem_at(bp, i);
4365}
4366
1191cb83 4367static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
4368{
4369 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 4370 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
4371 bnx2x_fp(bp, index, sb_index_values) =
4372 (__le16 *)status_blk.e2_sb->sb.index_values;
4373 bnx2x_fp(bp, index, sb_running_index) =
4374 (__le16 *)status_blk.e2_sb->sb.running_index;
4375 } else {
4376 bnx2x_fp(bp, index, sb_index_values) =
4377 (__le16 *)status_blk.e1x_sb->sb.index_values;
4378 bnx2x_fp(bp, index, sb_running_index) =
4379 (__le16 *)status_blk.e1x_sb->sb.running_index;
4380 }
4381}
4382
1191cb83
ED
4383/* Returns the number of actually allocated BDs */
4384static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4385 int rx_ring_size)
4386{
4387 struct bnx2x *bp = fp->bp;
4388 u16 ring_prod, cqe_ring_prod;
4389 int i, failure_cnt = 0;
4390
4391 fp->rx_comp_cons = 0;
4392 cqe_ring_prod = ring_prod = 0;
4393
4394 /* This routine is called only during fo init so
4395 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4396 */
4397 for (i = 0; i < rx_ring_size; i++) {
996dedba 4398 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
1191cb83
ED
4399 failure_cnt++;
4400 continue;
4401 }
4402 ring_prod = NEXT_RX_IDX(ring_prod);
4403 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4404 WARN_ON(ring_prod <= (i - failure_cnt));
4405 }
4406
4407 if (failure_cnt)
4408 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4409 i - failure_cnt, fp->index);
4410
4411 fp->rx_bd_prod = ring_prod;
4412 /* Limit the CQE producer by the CQE ring size */
4413 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4414 cqe_ring_prod);
4415 fp->rx_pkt = fp->rx_calls = 0;
4416
15192a8c 4417 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4418
4419 return i - failure_cnt;
4420}
4421
4422static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4423{
4424 int i;
4425
4426 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4427 struct eth_rx_cqe_next_page *nextpg;
4428
4429 nextpg = (struct eth_rx_cqe_next_page *)
4430 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4431 nextpg->addr_hi =
4432 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4433 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4434 nextpg->addr_lo =
4435 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4436 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4437 }
4438}
4439
b3b83c3f
DK
4440static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4441{
4442 union host_hc_status_block *sb;
4443 struct bnx2x_fastpath *fp = &bp->fp[index];
4444 int ring_size = 0;
6383c0b3 4445 u8 cos;
c2188952 4446 int rx_ring_size = 0;
b3b83c3f 4447
2e98ffc2 4448 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
614c76df
DK
4449 rx_ring_size = MIN_RX_SIZE_NONTPA;
4450 bp->rx_ring_size = rx_ring_size;
55c11941 4451 } else if (!bp->rx_ring_size) {
c2188952
VZ
4452 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4453
065f8b92
YM
4454 if (CHIP_IS_E3(bp)) {
4455 u32 cfg = SHMEM_RD(bp,
4456 dev_info.port_hw_config[BP_PORT(bp)].
4457 default_cfg);
4458
4459 /* Decrease ring size for 1G functions */
4460 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4461 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4462 rx_ring_size /= 10;
4463 }
d760fc37 4464
c2188952
VZ
4465 /* allocate at least number of buffers required by FW */
4466 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4467 MIN_RX_SIZE_TPA, rx_ring_size);
4468
4469 bp->rx_ring_size = rx_ring_size;
614c76df 4470 } else /* if rx_ring_size specified - use it */
c2188952 4471 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4472
04c46736
YM
4473 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4474
b3b83c3f
DK
4475 /* Common */
4476 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4477
b3b83c3f 4478 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4479 /* status blocks */
cd2b0389
JP
4480 if (!CHIP_IS_E1x(bp)) {
4481 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4482 sizeof(struct host_hc_status_block_e2));
4483 if (!sb->e2_sb)
4484 goto alloc_mem_err;
4485 } else {
4486 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4487 sizeof(struct host_hc_status_block_e1x));
4488 if (!sb->e1x_sb)
4489 goto alloc_mem_err;
4490 }
b3b83c3f 4491 }
8eef2af1
DK
4492
4493 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4494 * set shortcuts for it.
4495 */
4496 if (!IS_FCOE_IDX(index))
4497 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4498
4499 /* Tx */
4500 if (!skip_tx_queue(bp, index)) {
4501 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4502 for_each_cos_in_tx_queue(fp, cos) {
65565884 4503 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4504
51c1a580
MS
4505 DP(NETIF_MSG_IFUP,
4506 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4507 index, cos);
4508
cd2b0389
JP
4509 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4510 sizeof(struct sw_tx_bd),
4511 GFP_KERNEL);
4512 if (!txdata->tx_buf_ring)
4513 goto alloc_mem_err;
4514 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4515 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4516 if (!txdata->tx_desc_ring)
4517 goto alloc_mem_err;
6383c0b3 4518 }
b3b83c3f
DK
4519 }
4520
4521 /* Rx */
4522 if (!skip_rx_queue(bp, index)) {
4523 /* fastpath rx rings: rx_buf rx_desc rx_comp */
cd2b0389
JP
4524 bnx2x_fp(bp, index, rx_buf_ring) =
4525 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4526 if (!bnx2x_fp(bp, index, rx_buf_ring))
4527 goto alloc_mem_err;
4528 bnx2x_fp(bp, index, rx_desc_ring) =
4529 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4530 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4531 if (!bnx2x_fp(bp, index, rx_desc_ring))
4532 goto alloc_mem_err;
b3b83c3f 4533
75b29459 4534 /* Seed all CQEs by 1s */
cd2b0389
JP
4535 bnx2x_fp(bp, index, rx_comp_ring) =
4536 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4537 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4538 if (!bnx2x_fp(bp, index, rx_comp_ring))
4539 goto alloc_mem_err;
b3b83c3f
DK
4540
4541 /* SGE ring */
cd2b0389
JP
4542 bnx2x_fp(bp, index, rx_page_ring) =
4543 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4544 GFP_KERNEL);
4545 if (!bnx2x_fp(bp, index, rx_page_ring))
4546 goto alloc_mem_err;
4547 bnx2x_fp(bp, index, rx_sge_ring) =
4548 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4549 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4550 if (!bnx2x_fp(bp, index, rx_sge_ring))
4551 goto alloc_mem_err;
b3b83c3f
DK
4552 /* RX BD ring */
4553 bnx2x_set_next_page_rx_bd(fp);
4554
4555 /* CQ ring */
4556 bnx2x_set_next_page_rx_cq(fp);
4557
4558 /* BDs */
4559 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4560 if (ring_size < rx_ring_size)
4561 goto alloc_mem_err;
4562 }
4563
4564 return 0;
4565
4566/* handles low memory cases */
4567alloc_mem_err:
4568 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4569 index, ring_size);
4570 /* FW will drop all packets if queue is not big enough,
4571 * In these cases we disable the queue
6383c0b3 4572 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f 4573 */
7e6b4d44 4574 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
eb722d7a 4575 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4576 /* release memory allocated for this queue */
4577 bnx2x_free_fp_mem_at(bp, index);
4578 return -ENOMEM;
4579 }
4580 return 0;
4581}
4582
a8f47eb7 4583static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4584{
4585 if (!NO_FCOE(bp))
4586 /* FCoE */
4587 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4588 /* we will fail load process instead of mark
4589 * NO_FCOE_FLAG
4590 */
4591 return -ENOMEM;
4592
4593 return 0;
4594}
4595
a8f47eb7 4596static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
b3b83c3f
DK
4597{
4598 int i;
4599
55c11941
MS
4600 /* 1. Allocate FP for leading - fatal if error
4601 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4602 */
4603
4604 /* leading */
4605 if (bnx2x_alloc_fp_mem_at(bp, 0))
4606 return -ENOMEM;
6383c0b3 4607
b3b83c3f
DK
4608 /* RSS */
4609 for_each_nondefault_eth_queue(bp, i)
4610 if (bnx2x_alloc_fp_mem_at(bp, i))
4611 break;
4612
4613 /* handle memory failures */
4614 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4615 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4616
4617 WARN_ON(delta < 0);
4864a16a 4618 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4619 if (CNIC_SUPPORT(bp))
4620 /* move non eth FPs next to last eth FP
4621 * must be done in that order
4622 * FCOE_IDX < FWD_IDX < OOO_IDX
4623 */
b3b83c3f 4624
55c11941
MS
4625 /* move FCoE fp even NO_FCOE_FLAG is on */
4626 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4627 bp->num_ethernet_queues -= delta;
4628 bp->num_queues = bp->num_ethernet_queues +
4629 bp->num_cnic_queues;
b3b83c3f
DK
4630 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4631 bp->num_queues + delta, bp->num_queues);
4632 }
4633
4634 return 0;
4635}
d6214d7a 4636
523224a3
DK
4637void bnx2x_free_mem_bp(struct bnx2x *bp)
4638{
c3146eb6
DK
4639 int i;
4640
4641 for (i = 0; i < bp->fp_array_size; i++)
4642 kfree(bp->fp[i].tpa_info);
523224a3 4643 kfree(bp->fp);
15192a8c
BW
4644 kfree(bp->sp_objs);
4645 kfree(bp->fp_stats);
65565884 4646 kfree(bp->bnx2x_txq);
523224a3
DK
4647 kfree(bp->msix_table);
4648 kfree(bp->ilt);
4649}
4650
0329aba1 4651int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4652{
4653 struct bnx2x_fastpath *fp;
4654 struct msix_entry *tbl;
4655 struct bnx2x_ilt *ilt;
6383c0b3 4656 int msix_table_size = 0;
55c11941 4657 int fp_array_size, txq_array_size;
15192a8c 4658 int i;
6383c0b3
AE
4659
4660 /*
4661 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4662 * path IGU SBs plus default SB (for PF only).
6383c0b3 4663 */
1ab4434c
AE
4664 msix_table_size = bp->igu_sb_cnt;
4665 if (IS_PF(bp))
4666 msix_table_size++;
4667 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4668
6383c0b3 4669 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4670 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4671 bp->fp_array_size = fp_array_size;
4672 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4673
c3146eb6 4674 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4675 if (!fp)
4676 goto alloc_err;
c3146eb6 4677 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4678 fp[i].tpa_info =
4679 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4680 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4681 if (!(fp[i].tpa_info))
4682 goto alloc_err;
4683 }
4684
523224a3
DK
4685 bp->fp = fp;
4686
15192a8c 4687 /* allocate sp objs */
c3146eb6 4688 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4689 GFP_KERNEL);
4690 if (!bp->sp_objs)
4691 goto alloc_err;
4692
4693 /* allocate fp_stats */
c3146eb6 4694 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4695 GFP_KERNEL);
4696 if (!bp->fp_stats)
4697 goto alloc_err;
4698
65565884 4699 /* Allocate memory for the transmission queues array */
55c11941
MS
4700 txq_array_size =
4701 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4702 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4703
4704 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4705 GFP_KERNEL);
65565884
MS
4706 if (!bp->bnx2x_txq)
4707 goto alloc_err;
4708
523224a3 4709 /* msix table */
01e23742 4710 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4711 if (!tbl)
4712 goto alloc_err;
4713 bp->msix_table = tbl;
4714
4715 /* ilt */
4716 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4717 if (!ilt)
4718 goto alloc_err;
4719 bp->ilt = ilt;
4720
4721 return 0;
4722alloc_err:
4723 bnx2x_free_mem_bp(bp);
4724 return -ENOMEM;
523224a3
DK
4725}
4726
a9fccec7 4727int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4728{
4729 struct bnx2x *bp = netdev_priv(dev);
4730
4731 if (unlikely(!netif_running(dev)))
4732 return 0;
4733
5d07d868 4734 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4735 return bnx2x_nic_load(bp, LOAD_NORMAL);
4736}
4737
1ac9e428
YR
4738int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4739{
4740 u32 sel_phy_idx = 0;
4741 if (bp->link_params.num_phys <= 1)
4742 return INT_PHY;
4743
4744 if (bp->link_vars.link_up) {
4745 sel_phy_idx = EXT_PHY1;
4746 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4747 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4748 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4749 sel_phy_idx = EXT_PHY2;
4750 } else {
4751
4752 switch (bnx2x_phy_selection(&bp->link_params)) {
4753 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4754 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4755 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4756 sel_phy_idx = EXT_PHY1;
4757 break;
4758 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4759 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4760 sel_phy_idx = EXT_PHY2;
4761 break;
4762 }
4763 }
4764
4765 return sel_phy_idx;
1ac9e428
YR
4766}
4767int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4768{
4769 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4770 /*
2de67439 4771 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4772 * swapping is enabled). So when swapping is enabled, we need to reverse
4773 * the configuration
4774 */
4775
4776 if (bp->link_params.multi_phy_config &
4777 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4778 if (sel_phy_idx == EXT_PHY1)
4779 sel_phy_idx = EXT_PHY2;
4780 else if (sel_phy_idx == EXT_PHY2)
4781 sel_phy_idx = EXT_PHY1;
4782 }
4783 return LINK_CONFIG_IDX(sel_phy_idx);
4784}
4785
55c11941 4786#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4787int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4788{
4789 struct bnx2x *bp = netdev_priv(dev);
4790 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4791
4792 switch (type) {
4793 case NETDEV_FCOE_WWNN:
4794 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4795 cp->fcoe_wwn_node_name_lo);
4796 break;
4797 case NETDEV_FCOE_WWPN:
4798 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4799 cp->fcoe_wwn_port_name_lo);
4800 break;
4801 default:
51c1a580 4802 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4803 return -EINVAL;
4804 }
4805
4806 return 0;
4807}
4808#endif
4809
9f6c9258
DK
4810/* called with rtnl_lock */
4811int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4812{
4813 struct bnx2x *bp = netdev_priv(dev);
9f6c9258 4814
0650c0b8
YM
4815 if (pci_num_vf(bp->pdev)) {
4816 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4817 return -EPERM;
4818 }
4819
9f6c9258 4820 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4821 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4822 return -EAGAIN;
4823 }
4824
4825 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4826 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4827 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4828 return -EINVAL;
51c1a580 4829 }
9f6c9258
DK
4830
4831 /* This does not race with packet allocation
4832 * because the actual alloc size is
4833 * only updated as part of load
4834 */
4835 dev->mtu = new_mtu;
4836
66371c44
MM
4837 return bnx2x_reload_if_running(dev);
4838}
4839
c8f44aff 4840netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4841 netdev_features_t features)
66371c44
MM
4842{
4843 struct bnx2x *bp = netdev_priv(dev);
4844
909d9faa
YM
4845 if (pci_num_vf(bp->pdev)) {
4846 netdev_features_t changed = dev->features ^ features;
4847
4848 /* Revert the requested changes in features if they
4849 * would require internal reload of PF in bnx2x_set_features().
4850 */
4851 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4852 features &= ~NETIF_F_RXCSUM;
4853 features |= dev->features & NETIF_F_RXCSUM;
4854 }
4855
4856 if (changed & NETIF_F_LOOPBACK) {
4857 features &= ~NETIF_F_LOOPBACK;
4858 features |= dev->features & NETIF_F_LOOPBACK;
4859 }
4860 }
4861
66371c44 4862 /* TPA requires Rx CSUM offloading */
aebf6244 4863 if (!(features & NETIF_F_RXCSUM)) {
66371c44 4864 features &= ~NETIF_F_LRO;
621b4d66
DK
4865 features &= ~NETIF_F_GRO;
4866 }
66371c44
MM
4867
4868 return features;
4869}
4870
c8f44aff 4871int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4872{
4873 struct bnx2x *bp = netdev_priv(dev);
f8dcb5e3 4874 netdev_features_t changes = features ^ dev->features;
538dd2e3 4875 bool bnx2x_reload = false;
f8dcb5e3 4876 int rc;
621b4d66 4877
909d9faa
YM
4878 /* VFs or non SRIOV PFs should be able to change loopback feature */
4879 if (!pci_num_vf(bp->pdev)) {
4880 if (features & NETIF_F_LOOPBACK) {
4881 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4882 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4883 bnx2x_reload = true;
4884 }
4885 } else {
4886 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4887 bp->link_params.loopback_mode = LOOPBACK_NONE;
4888 bnx2x_reload = true;
4889 }
538dd2e3
MB
4890 }
4891 }
4892
16a5fd92 4893 /* if GRO is changed while LRO is enabled, don't force a reload */
f8dcb5e3
MS
4894 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4895 changes &= ~NETIF_F_GRO;
8802f579 4896
aebf6244 4897 /* if GRO is changed while HW TPA is off, don't force a reload */
f8dcb5e3
MS
4898 if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4899 changes &= ~NETIF_F_GRO;
aebf6244 4900
8802f579 4901 if (changes)
538dd2e3 4902 bnx2x_reload = true;
8802f579 4903
538dd2e3 4904 if (bnx2x_reload) {
f8dcb5e3
MS
4905 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4906 dev->features = features;
4907 rc = bnx2x_reload_if_running(dev);
4908 return rc ? rc : 1;
4909 }
66371c44 4910 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4911 }
4912
66371c44 4913 return 0;
9f6c9258
DK
4914}
4915
4916void bnx2x_tx_timeout(struct net_device *dev)
4917{
4918 struct bnx2x *bp = netdev_priv(dev);
4919
4920#ifdef BNX2X_STOP_ON_ERROR
4921 if (!bp->panic)
4922 bnx2x_panic();
4923#endif
7be08a72 4924
9f6c9258 4925 /* This allows the netif to be shutdown gracefully before resetting */
230bb0f3 4926 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
9f6c9258
DK
4927}
4928
9f6c9258
DK
4929int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4930{
4931 struct net_device *dev = pci_get_drvdata(pdev);
4932 struct bnx2x *bp;
4933
4934 if (!dev) {
4935 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4936 return -ENODEV;
4937 }
4938 bp = netdev_priv(dev);
4939
4940 rtnl_lock();
4941
4942 pci_save_state(pdev);
4943
4944 if (!netif_running(dev)) {
4945 rtnl_unlock();
4946 return 0;
4947 }
4948
4949 netif_device_detach(dev);
4950
5d07d868 4951 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4952
4953 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4954
4955 rtnl_unlock();
4956
4957 return 0;
4958}
4959
4960int bnx2x_resume(struct pci_dev *pdev)
4961{
4962 struct net_device *dev = pci_get_drvdata(pdev);
4963 struct bnx2x *bp;
4964 int rc;
4965
4966 if (!dev) {
4967 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4968 return -ENODEV;
4969 }
4970 bp = netdev_priv(dev);
4971
4972 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4973 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4974 return -EAGAIN;
4975 }
4976
4977 rtnl_lock();
4978
4979 pci_restore_state(pdev);
4980
4981 if (!netif_running(dev)) {
4982 rtnl_unlock();
4983 return 0;
4984 }
4985
4986 bnx2x_set_power_state(bp, PCI_D0);
4987 netif_device_attach(dev);
4988
4989 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4990
4991 rtnl_unlock();
4992
4993 return rc;
4994}
619c5cb6 4995
619c5cb6
VZ
4996void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4997 u32 cid)
4998{
b9871bcf
AE
4999 if (!cxt) {
5000 BNX2X_ERR("bad context pointer %p\n", cxt);
5001 return;
5002 }
5003
619c5cb6
VZ
5004 /* ustorm cxt validation */
5005 cxt->ustorm_ag_context.cdu_usage =
5006 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5007 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5008 /* xcontext validation */
5009 cxt->xstorm_ag_context.cdu_reserved =
5010 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5011 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5012}
5013
1191cb83
ED
5014static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5015 u8 fw_sb_id, u8 sb_index,
5016 u8 ticks)
619c5cb6 5017{
619c5cb6
VZ
5018 u32 addr = BAR_CSTRORM_INTMEM +
5019 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5020 REG_WR8(bp, addr, ticks);
51c1a580
MS
5021 DP(NETIF_MSG_IFUP,
5022 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5023 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
5024}
5025
1191cb83
ED
5026static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5027 u16 fw_sb_id, u8 sb_index,
5028 u8 disable)
619c5cb6
VZ
5029{
5030 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5031 u32 addr = BAR_CSTRORM_INTMEM +
5032 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
0c14e5ce 5033 u8 flags = REG_RD8(bp, addr);
619c5cb6
VZ
5034 /* clear and set */
5035 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5036 flags |= enable_flag;
0c14e5ce 5037 REG_WR8(bp, addr, flags);
51c1a580
MS
5038 DP(NETIF_MSG_IFUP,
5039 "port %x fw_sb_id %d sb_index %d disable %d\n",
5040 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
5041}
5042
5043void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5044 u8 sb_index, u8 disable, u16 usec)
5045{
5046 int port = BP_PORT(bp);
5047 u8 ticks = usec / BNX2X_BTR;
5048
5049 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5050
5051 disable = disable ? 1 : (usec ? 0 : 1);
5052 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5053}
230bb0f3
YM
5054
5055void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5056 u32 verbose)
5057{
4e857c58 5058 smp_mb__before_atomic();
230bb0f3 5059 set_bit(flag, &bp->sp_rtnl_state);
4e857c58 5060 smp_mb__after_atomic();
230bb0f3
YM
5061 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5062 flag);
5063 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5064}
5065EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);