]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
net: rename include/net/ll_poll.h to include/net/busy_poll.h
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
9969085e 24#include <net/tcp.h>
f2e0899f 25#include <net/ipv6.h>
7f3e01fe 26#include <net/ip6_checksum.h>
076bb0c8 27#include <net/busy_poll.h>
c0cba59e 28#include <linux/prefetch.h>
9f6c9258 29#include "bnx2x_cmn.h"
523224a3 30#include "bnx2x_init.h"
042181f5 31#include "bnx2x_sp.h"
9f6c9258 32
b3b83c3f
DK
33/**
34 * bnx2x_move_fp - move content of the fastpath structure.
35 *
36 * @bp: driver handle
37 * @from: source FP index
38 * @to: destination FP index
39 *
40 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
65565884
MS
43 * source onto the target. Update txdata pointers and related
44 * content.
b3b83c3f
DK
45 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
72754080
AE
56
57 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi;
59
b3b83c3f
DK
60 /* Move bnx2x_fastpath contents */
61 memcpy(to_fp, from_fp, sizeof(*to_fp));
62 to_fp->index = to;
65565884 63
15192a8c
BW
64 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
69
65565884
MS
70 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
73 */
74
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
77 (bp)->max_cos;
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
81 }
82
4864a16a
YM
83 memcpy(&bp->bnx2x_txq[new_txdata_index],
84 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
87}
88
8ca5e17e
AE
89/**
90 * bnx2x_fill_fw_str - Fill buffer with FW version string.
91 *
92 * @bp: driver handle
93 * @buf: character buffer to fill with the fw name
94 * @buf_len: length of the above buffer
95 *
96 */
97void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
98{
99 if (IS_PF(bp)) {
100 u8 phy_fw_ver[PHY_FW_VER_LEN];
101
102 phy_fw_ver[0] = '\0';
103 bnx2x_get_ext_phy_fw_version(&bp->link_params,
104 phy_fw_ver, PHY_FW_VER_LEN);
105 strlcpy(buf, bp->fw_ver, buf_len);
106 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
107 "bc %d.%d.%d%s%s",
108 (bp->common.bc_ver & 0xff0000) >> 16,
109 (bp->common.bc_ver & 0xff00) >> 8,
110 (bp->common.bc_ver & 0xff),
111 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
112 } else {
6411280a 113 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
114 }
115}
116
4864a16a
YM
117/**
118 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
119 *
120 * @bp: driver handle
121 * @delta: number of eth queues which were not allocated
122 */
123static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
124{
125 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
126
127 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
16a5fd92 128 * backward along the array could cause memory to be overridden
4864a16a
YM
129 */
130 for (cos = 1; cos < bp->max_cos; cos++) {
131 for (i = 0; i < old_eth_num - delta; i++) {
132 struct bnx2x_fastpath *fp = &bp->fp[i];
133 int new_idx = cos * (old_eth_num - delta) + i;
134
135 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
136 sizeof(struct bnx2x_fp_txdata));
137 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
138 }
139 }
140}
141
619c5cb6
VZ
142int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
143
9f6c9258
DK
144/* free skb in the packet ring at pos idx
145 * return idx of last bd freed
146 */
6383c0b3 147static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
148 u16 idx, unsigned int *pkts_compl,
149 unsigned int *bytes_compl)
9f6c9258 150{
6383c0b3 151 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
152 struct eth_tx_start_bd *tx_start_bd;
153 struct eth_tx_bd *tx_data_bd;
154 struct sk_buff *skb = tx_buf->skb;
155 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
156 int nbd;
157
158 /* prefetch skb end pointer to speedup dev_kfree_skb() */
159 prefetch(&skb->end);
160
51c1a580 161 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 162 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
163
164 /* unmap first bd */
6383c0b3 165 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 166 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 167 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258
DK
168
169 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
170#ifdef BNX2X_STOP_ON_ERROR
171 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
172 BNX2X_ERR("BAD nbd!\n");
173 bnx2x_panic();
174 }
175#endif
176 new_cons = nbd + tx_buf->first_bd;
177
178 /* Get the next bd */
179 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
180
181 /* Skip a parse bd... */
182 --nbd;
183 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
184
185 /* ...and the TSO split header bd since they have no mapping */
186 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
187 --nbd;
188 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
189 }
190
191 /* now free frags */
192 while (nbd > 0) {
193
6383c0b3 194 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
195 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
196 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
197 if (--nbd)
198 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
199 }
200
201 /* release skb */
202 WARN_ON(!skb);
d8290ae5 203 if (likely(skb)) {
2df1a70a
TH
204 (*pkts_compl)++;
205 (*bytes_compl) += skb->len;
206 }
d8290ae5 207
40955532 208 dev_kfree_skb_any(skb);
9f6c9258
DK
209 tx_buf->first_bd = 0;
210 tx_buf->skb = NULL;
211
212 return new_cons;
213}
214
6383c0b3 215int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 216{
9f6c9258 217 struct netdev_queue *txq;
6383c0b3 218 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 219 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
220
221#ifdef BNX2X_STOP_ON_ERROR
222 if (unlikely(bp->panic))
223 return -1;
224#endif
225
6383c0b3
AE
226 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
227 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
228 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
229
230 while (sw_cons != hw_cons) {
231 u16 pkt_cons;
232
233 pkt_cons = TX_BD(sw_cons);
234
51c1a580
MS
235 DP(NETIF_MSG_TX_DONE,
236 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 237 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 238
2df1a70a 239 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 240 &pkts_compl, &bytes_compl);
2df1a70a 241
9f6c9258
DK
242 sw_cons++;
243 }
244
2df1a70a
TH
245 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
246
6383c0b3
AE
247 txdata->tx_pkt_cons = sw_cons;
248 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
249
250 /* Need to make the tx_bd_cons update visible to start_xmit()
251 * before checking for netif_tx_queue_stopped(). Without the
252 * memory barrier, there is a small possibility that
253 * start_xmit() will miss it and cause the queue to be stopped
254 * forever.
619c5cb6
VZ
255 * On the other hand we need an rmb() here to ensure the proper
256 * ordering of bit testing in the following
257 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
258 */
259 smp_mb();
260
9f6c9258 261 if (unlikely(netif_tx_queue_stopped(txq))) {
16a5fd92 262 /* Taking tx_lock() is needed to prevent re-enabling the queue
9f6c9258
DK
263 * while it's empty. This could have happen if rx_action() gets
264 * suspended in bnx2x_tx_int() after the condition before
265 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
266 *
267 * stops the queue->sees fresh tx_bd_cons->releases the queue->
268 * sends some packets consuming the whole queue again->
269 * stops the queue
270 */
271
272 __netif_tx_lock(txq, smp_processor_id());
273
274 if ((netif_tx_queue_stopped(txq)) &&
275 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 276 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
277 netif_tx_wake_queue(txq);
278
279 __netif_tx_unlock(txq);
280 }
281 return 0;
282}
283
284static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
285 u16 idx)
286{
287 u16 last_max = fp->last_max_sge;
288
289 if (SUB_S16(idx, last_max) > 0)
290 fp->last_max_sge = idx;
291}
292
621b4d66
DK
293static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
294 u16 sge_len,
295 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
296{
297 struct bnx2x *bp = fp->bp;
9f6c9258
DK
298 u16 last_max, last_elem, first_elem;
299 u16 delta = 0;
300 u16 i;
301
302 if (!sge_len)
303 return;
304
305 /* First mark all used pages */
306 for (i = 0; i < sge_len; i++)
619c5cb6 307 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 308 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
309
310 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 311 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
312
313 /* Here we assume that the last SGE index is the biggest */
314 prefetch((void *)(fp->sge_mask));
523224a3 315 bnx2x_update_last_max_sge(fp,
621b4d66 316 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
317
318 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
319 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
320 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
321
322 /* If ring is not full */
323 if (last_elem + 1 != first_elem)
324 last_elem++;
325
326 /* Now update the prod */
327 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
328 if (likely(fp->sge_mask[i]))
329 break;
330
619c5cb6
VZ
331 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
332 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
333 }
334
335 if (delta > 0) {
336 fp->rx_sge_prod += delta;
337 /* clear page-end entries */
338 bnx2x_clear_sge_mask_next_elems(fp);
339 }
340
341 DP(NETIF_MSG_RX_STATUS,
342 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
343 fp->last_max_sge, fp->rx_sge_prod);
344}
345
2de67439 346/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
347 * CQE (calculated by HW).
348 */
349static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb
ED
350 const struct eth_fast_path_rx_cqe *cqe,
351 bool *l4_rxhash)
e52fcb24 352{
2de67439 353 /* Get Toeplitz hash from CQE */
e52fcb24 354 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
355 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
356 enum eth_rss_hash_type htype;
357
358 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
359 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
360 (htype == TCP_IPV6_HASH_TYPE);
e52fcb24 361 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb
ED
362 }
363 *l4_rxhash = false;
e52fcb24
ED
364 return 0;
365}
366
9f6c9258 367static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 368 u16 cons, u16 prod,
619c5cb6 369 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
370{
371 struct bnx2x *bp = fp->bp;
372 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
373 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
374 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
375 dma_addr_t mapping;
619c5cb6
VZ
376 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
377 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 378
619c5cb6
VZ
379 /* print error if current state != stop */
380 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
381 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
382
e52fcb24 383 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 384 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 385 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
386 fp->rx_buf_size, DMA_FROM_DEVICE);
387 /*
388 * ...if it fails - move the skb from the consumer to the producer
389 * and set the current aggregation state as ERROR to drop it
390 * when TPA_STOP arrives.
391 */
392
393 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
394 /* Move the BD from the consumer to the producer */
e52fcb24 395 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
396 tpa_info->tpa_state = BNX2X_TPA_ERROR;
397 return;
398 }
9f6c9258 399
e52fcb24
ED
400 /* move empty data from pool to prod */
401 prod_rx_buf->data = first_buf->data;
619c5cb6 402 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 403 /* point prod_bd to new data */
9f6c9258
DK
404 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
405 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
406
619c5cb6
VZ
407 /* move partial skb from cons to pool (don't unmap yet) */
408 *first_buf = *cons_rx_buf;
409
410 /* mark bin state as START */
411 tpa_info->parsing_flags =
412 le16_to_cpu(cqe->pars_flags.flags);
413 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
414 tpa_info->tpa_state = BNX2X_TPA_START;
415 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
416 tpa_info->placement_offset = cqe->placement_offset;
a334b5fb 417 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
621b4d66
DK
418 if (fp->mode == TPA_MODE_GRO) {
419 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 420 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
421 tpa_info->gro_size = gro_size;
422 }
619c5cb6 423
9f6c9258
DK
424#ifdef BNX2X_STOP_ON_ERROR
425 fp->tpa_queue_used |= (1 << queue);
426#ifdef _ASM_GENERIC_INT_L64_H
427 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
428#else
429 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
430#endif
431 fp->tpa_queue_used);
432#endif
433}
434
e4e3c02a
VZ
435/* Timestamp option length allowed for TPA aggregation:
436 *
437 * nop nop kind length echo val
438 */
439#define TPA_TSTAMP_OPT_LEN 12
440/**
cbf1de72 441 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 442 *
cbf1de72 443 * @skb: packet skb
e8920674
DK
444 * @parsing_flags: parsing flags from the START CQE
445 * @len_on_bd: total length of the first packet for the
446 * aggregation.
cbf1de72 447 * @pkt_len: length of all segments
e8920674
DK
448 *
449 * Approximate value of the MSS for this aggregation calculated using
450 * the first packet of it.
2de67439 451 * Compute number of aggregated segments, and gso_type.
e4e3c02a 452 */
cbf1de72 453static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
454 u16 len_on_bd, unsigned int pkt_len,
455 u16 num_of_coalesced_segs)
e4e3c02a 456{
cbf1de72 457 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 458 * other than timestamp or IPv6 extension headers.
e4e3c02a 459 */
619c5cb6
VZ
460 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
461
462 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 463 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 464 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
465 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
466 } else {
619c5cb6 467 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
468 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
469 }
e4e3c02a
VZ
470
471 /* Check if there was a TCP timestamp, if there is it's will
472 * always be 12 bytes length: nop nop kind length echo val.
473 *
474 * Otherwise FW would close the aggregation.
475 */
476 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
477 hdrs_len += TPA_TSTAMP_OPT_LEN;
478
cbf1de72
YM
479 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
480
481 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
482 * to skb_shinfo(skb)->gso_segs
483 */
ab5777d7 484 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
485}
486
1191cb83
ED
487static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
488 struct bnx2x_fastpath *fp, u16 index)
489{
490 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
491 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
492 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
493 dma_addr_t mapping;
494
495 if (unlikely(page == NULL)) {
496 BNX2X_ERR("Can't alloc sge\n");
497 return -ENOMEM;
498 }
499
500 mapping = dma_map_page(&bp->pdev->dev, page, 0,
924d75ab 501 SGE_PAGES, DMA_FROM_DEVICE);
1191cb83
ED
502 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
503 __free_pages(page, PAGES_PER_SGE_SHIFT);
504 BNX2X_ERR("Can't map sge\n");
505 return -ENOMEM;
506 }
507
508 sw_buf->page = page;
509 dma_unmap_addr_set(sw_buf, mapping, mapping);
510
511 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
512 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
513
514 return 0;
515}
516
9f6c9258 517static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
518 struct bnx2x_agg_info *tpa_info,
519 u16 pages,
520 struct sk_buff *skb,
619c5cb6
VZ
521 struct eth_end_agg_rx_cqe *cqe,
522 u16 cqe_idx)
9f6c9258
DK
523{
524 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
525 u32 i, frag_len, frag_size;
526 int err, j, frag_id = 0;
619c5cb6 527 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 528 u16 full_page = 0, gro_size = 0;
9f6c9258 529
619c5cb6 530 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
531
532 if (fp->mode == TPA_MODE_GRO) {
533 gro_size = tpa_info->gro_size;
534 full_page = tpa_info->full_page;
535 }
9f6c9258
DK
536
537 /* This is needed in order to enable forwarding support */
cbf1de72
YM
538 if (frag_size)
539 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
540 le16_to_cpu(cqe->pkt_len),
541 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 542
9f6c9258 543#ifdef BNX2X_STOP_ON_ERROR
924d75ab 544 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
545 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
546 pages, cqe_idx);
619c5cb6 547 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
548 bnx2x_panic();
549 return -EINVAL;
550 }
551#endif
552
553 /* Run through the SGL and compose the fragmented skb */
554 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 555 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
556
557 /* FW gives the indices of the SGE as if the ring is an array
558 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
559 if (fp->mode == TPA_MODE_GRO)
560 frag_len = min_t(u32, frag_size, (u32)full_page);
561 else /* LRO */
924d75ab 562 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 563
9f6c9258
DK
564 rx_pg = &fp->rx_page_ring[sge_idx];
565 old_rx_pg = *rx_pg;
566
567 /* If we fail to allocate a substitute page, we simply stop
568 where we are and drop the whole packet */
569 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
570 if (unlikely(err)) {
15192a8c 571 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
572 return err;
573 }
574
16a5fd92 575 /* Unmap the page as we're going to pass it to the stack */
9f6c9258
DK
576 dma_unmap_page(&bp->pdev->dev,
577 dma_unmap_addr(&old_rx_pg, mapping),
924d75ab 578 SGE_PAGES, DMA_FROM_DEVICE);
9f6c9258 579 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
580 if (fp->mode == TPA_MODE_LRO)
581 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
582 else { /* GRO */
583 int rem;
584 int offset = 0;
585 for (rem = frag_len; rem > 0; rem -= gro_size) {
586 int len = rem > gro_size ? gro_size : rem;
587 skb_fill_page_desc(skb, frag_id++,
588 old_rx_pg.page, offset, len);
589 if (offset)
590 get_page(old_rx_pg.page);
591 offset += len;
592 }
593 }
9f6c9258
DK
594
595 skb->data_len += frag_len;
924d75ab 596 skb->truesize += SGE_PAGES;
9f6c9258
DK
597 skb->len += frag_len;
598
599 frag_size -= frag_len;
600 }
601
602 return 0;
603}
604
d46d132c
ED
605static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
606{
607 if (fp->rx_frag_size)
608 put_page(virt_to_head_page(data));
609 else
610 kfree(data);
611}
612
613static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
614{
615 if (fp->rx_frag_size)
616 return netdev_alloc_frag(fp->rx_frag_size);
617
618 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
619}
620
9969085e
YM
621#ifdef CONFIG_INET
622static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
623{
624 const struct iphdr *iph = ip_hdr(skb);
625 struct tcphdr *th;
626
627 skb_set_transport_header(skb, sizeof(struct iphdr));
628 th = tcp_hdr(skb);
629
630 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
631 iph->saddr, iph->daddr, 0);
632}
633
634static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
635{
636 struct ipv6hdr *iph = ipv6_hdr(skb);
637 struct tcphdr *th;
638
639 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
640 th = tcp_hdr(skb);
641
642 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
643 &iph->saddr, &iph->daddr, 0);
644}
2c2d06d5
YM
645
646static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
647 void (*gro_func)(struct bnx2x*, struct sk_buff*))
648{
649 skb_set_network_header(skb, 0);
650 gro_func(bp, skb);
651 tcp_gro_complete(skb);
652}
9969085e
YM
653#endif
654
655static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
656 struct sk_buff *skb)
657{
658#ifdef CONFIG_INET
cbf1de72 659 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
660 switch (be16_to_cpu(skb->protocol)) {
661 case ETH_P_IP:
2c2d06d5 662 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
9969085e
YM
663 break;
664 case ETH_P_IPV6:
2c2d06d5 665 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
9969085e
YM
666 break;
667 default:
2c2d06d5 668 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
9969085e
YM
669 be16_to_cpu(skb->protocol));
670 }
9969085e
YM
671 }
672#endif
673 napi_gro_receive(&fp->napi, skb);
674}
675
1191cb83
ED
676static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
677 struct bnx2x_agg_info *tpa_info,
678 u16 pages,
679 struct eth_end_agg_rx_cqe *cqe,
680 u16 cqe_idx)
9f6c9258 681{
619c5cb6 682 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 683 u8 pad = tpa_info->placement_offset;
619c5cb6 684 u16 len = tpa_info->len_on_bd;
e52fcb24 685 struct sk_buff *skb = NULL;
621b4d66 686 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
687 u8 old_tpa_state = tpa_info->tpa_state;
688
689 tpa_info->tpa_state = BNX2X_TPA_STOP;
690
691 /* If we there was an error during the handling of the TPA_START -
692 * drop this aggregation.
693 */
694 if (old_tpa_state == BNX2X_TPA_ERROR)
695 goto drop;
696
e52fcb24 697 /* Try to allocate the new data */
d46d132c 698 new_data = bnx2x_frag_alloc(fp);
9f6c9258
DK
699 /* Unmap skb in the pool anyway, as we are going to change
700 pool entry status to BNX2X_TPA_STOP even if new skb allocation
701 fails. */
702 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 703 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 704 if (likely(new_data))
d46d132c 705 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 706
e52fcb24 707 if (likely(skb)) {
9f6c9258 708#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 709 if (pad + len > fp->rx_buf_size) {
51c1a580 710 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 711 pad, len, fp->rx_buf_size);
9f6c9258
DK
712 bnx2x_panic();
713 return;
714 }
715#endif
716
e52fcb24 717 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 718 skb_put(skb, len);
e52fcb24 719 skb->rxhash = tpa_info->rxhash;
a334b5fb 720 skb->l4_rxhash = tpa_info->l4_rxhash;
9f6c9258
DK
721
722 skb->protocol = eth_type_trans(skb, bp->dev);
723 skb->ip_summed = CHECKSUM_UNNECESSARY;
724
621b4d66
DK
725 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
726 skb, cqe, cqe_idx)) {
619c5cb6 727 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
86a9bad3 728 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
9969085e 729 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 730 } else {
51c1a580
MS
731 DP(NETIF_MSG_RX_STATUS,
732 "Failed to allocate new pages - dropping packet!\n");
40955532 733 dev_kfree_skb_any(skb);
9f6c9258
DK
734 }
735
e52fcb24
ED
736 /* put new data in bin */
737 rx_buf->data = new_data;
9f6c9258 738
619c5cb6 739 return;
9f6c9258 740 }
d46d132c 741 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
742drop:
743 /* drop the packet and keep the buffer in the bin */
744 DP(NETIF_MSG_RX_STATUS,
745 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 746 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
747}
748
1191cb83
ED
749static int bnx2x_alloc_rx_data(struct bnx2x *bp,
750 struct bnx2x_fastpath *fp, u16 index)
751{
752 u8 *data;
753 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
754 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
755 dma_addr_t mapping;
756
d46d132c 757 data = bnx2x_frag_alloc(fp);
1191cb83
ED
758 if (unlikely(data == NULL))
759 return -ENOMEM;
760
761 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
762 fp->rx_buf_size,
763 DMA_FROM_DEVICE);
764 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 765 bnx2x_frag_free(fp, data);
1191cb83
ED
766 BNX2X_ERR("Can't map rx data\n");
767 return -ENOMEM;
768 }
769
770 rx_buf->data = data;
771 dma_unmap_addr_set(rx_buf, mapping, mapping);
772
773 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
774 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
775
776 return 0;
777}
778
15192a8c
BW
779static
780void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
781 struct bnx2x_fastpath *fp,
782 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 783{
e488921f
MS
784 /* Do nothing if no L4 csum validation was done.
785 * We do not check whether IP csum was validated. For IPv4 we assume
786 * that if the card got as far as validating the L4 csum, it also
787 * validated the IP csum. IPv6 has no IP csum.
788 */
d6cb3e41 789 if (cqe->fast_path_cqe.status_flags &
e488921f 790 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
791 return;
792
e488921f 793 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
794
795 if (cqe->fast_path_cqe.type_error_flags &
796 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
797 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 798 qstats->hw_csum_err++;
d6cb3e41
ED
799 else
800 skb->ip_summed = CHECKSUM_UNNECESSARY;
801}
9f6c9258
DK
802
803int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
804{
805 struct bnx2x *bp = fp->bp;
806 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
75b29459 807 u16 sw_comp_cons, sw_comp_prod;
9f6c9258 808 int rx_pkt = 0;
75b29459
DK
809 union eth_rx_cqe *cqe;
810 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258
DK
811
812#ifdef BNX2X_STOP_ON_ERROR
813 if (unlikely(bp->panic))
814 return 0;
815#endif
816
9f6c9258
DK
817 bd_cons = fp->rx_bd_cons;
818 bd_prod = fp->rx_bd_prod;
819 bd_prod_fw = bd_prod;
820 sw_comp_cons = fp->rx_comp_cons;
821 sw_comp_prod = fp->rx_comp_prod;
822
75b29459
DK
823 comp_ring_cons = RCQ_BD(sw_comp_cons);
824 cqe = &fp->rx_comp_ring[comp_ring_cons];
825 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
826
827 DP(NETIF_MSG_RX_STATUS,
75b29459 828 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
9f6c9258 829
75b29459 830 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
9f6c9258
DK
831 struct sw_rx_bd *rx_buf = NULL;
832 struct sk_buff *skb;
9f6c9258 833 u8 cqe_fp_flags;
619c5cb6 834 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 835 u16 len, pad, queue;
e52fcb24 836 u8 *data;
a334b5fb 837 bool l4_rxhash;
9f6c9258 838
619c5cb6
VZ
839#ifdef BNX2X_STOP_ON_ERROR
840 if (unlikely(bp->panic))
841 return 0;
842#endif
843
9f6c9258
DK
844 bd_prod = RX_BD(bd_prod);
845 bd_cons = RX_BD(bd_cons);
846
619c5cb6
VZ
847 cqe_fp_flags = cqe_fp->type_error_flags;
848 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 849
51c1a580
MS
850 DP(NETIF_MSG_RX_STATUS,
851 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
852 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
853 cqe_fp_flags, cqe_fp->status_flags,
854 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
855 le16_to_cpu(cqe_fp->vlan_tag),
856 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
857
858 /* is this a slowpath msg? */
619c5cb6 859 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
860 bnx2x_sp_event(fp, cqe);
861 goto next_cqe;
e52fcb24 862 }
621b4d66 863
e52fcb24
ED
864 rx_buf = &fp->rx_buf_ring[bd_cons];
865 data = rx_buf->data;
9f6c9258 866
e52fcb24 867 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
868 struct bnx2x_agg_info *tpa_info;
869 u16 frag_size, pages;
619c5cb6 870#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
871 /* sanity check */
872 if (fp->disable_tpa &&
873 (CQE_TYPE_START(cqe_fp_type) ||
874 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 875 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 876 CQE_TYPE(cqe_fp_type));
619c5cb6 877#endif
9f6c9258 878
e52fcb24
ED
879 if (CQE_TYPE_START(cqe_fp_type)) {
880 u16 queue = cqe_fp->queue_index;
881 DP(NETIF_MSG_RX_STATUS,
882 "calling tpa_start on queue %d\n",
883 queue);
9f6c9258 884
e52fcb24
ED
885 bnx2x_tpa_start(fp, queue,
886 bd_cons, bd_prod,
887 cqe_fp);
621b4d66 888
e52fcb24 889 goto next_rx;
621b4d66
DK
890 }
891 queue = cqe->end_agg_cqe.queue_index;
892 tpa_info = &fp->tpa_info[queue];
893 DP(NETIF_MSG_RX_STATUS,
894 "calling tpa_stop on queue %d\n",
895 queue);
896
897 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
898 tpa_info->len_on_bd;
899
900 if (fp->mode == TPA_MODE_GRO)
901 pages = (frag_size + tpa_info->full_page - 1) /
902 tpa_info->full_page;
903 else
904 pages = SGE_PAGE_ALIGN(frag_size) >>
905 SGE_PAGE_SHIFT;
906
907 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
908 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 909#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
910 if (bp->panic)
911 return 0;
9f6c9258
DK
912#endif
913
621b4d66
DK
914 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
915 goto next_cqe;
e52fcb24
ED
916 }
917 /* non TPA */
621b4d66 918 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
919 pad = cqe_fp->placement_offset;
920 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 921 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
922 pad + RX_COPY_THRESH,
923 DMA_FROM_DEVICE);
924 pad += NET_SKB_PAD;
925 prefetch(data + pad); /* speedup eth_type_trans() */
926 /* is this an error packet? */
927 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 928 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
929 "ERROR flags %x rx packet %u\n",
930 cqe_fp_flags, sw_comp_cons);
15192a8c 931 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
932 goto reuse_rx;
933 }
9f6c9258 934
e52fcb24
ED
935 /* Since we don't have a jumbo ring
936 * copy small packets if mtu > 1500
937 */
938 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
939 (len <= RX_COPY_THRESH)) {
940 skb = netdev_alloc_skb_ip_align(bp->dev, len);
941 if (skb == NULL) {
51c1a580 942 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 943 "ERROR packet dropped because of alloc failure\n");
15192a8c 944 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
945 goto reuse_rx;
946 }
e52fcb24
ED
947 memcpy(skb->data, data + pad, len);
948 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
949 } else {
950 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
9f6c9258 951 dma_unmap_single(&bp->pdev->dev,
e52fcb24 952 dma_unmap_addr(rx_buf, mapping),
a8c94b91 953 fp->rx_buf_size,
9f6c9258 954 DMA_FROM_DEVICE);
d46d132c 955 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 956 if (unlikely(!skb)) {
d46d132c 957 bnx2x_frag_free(fp, data);
15192a8c
BW
958 bnx2x_fp_qstats(bp, fp)->
959 rx_skb_alloc_failed++;
e52fcb24
ED
960 goto next_rx;
961 }
9f6c9258 962 skb_reserve(skb, pad);
9f6c9258 963 } else {
51c1a580
MS
964 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
965 "ERROR packet dropped because of alloc failure\n");
15192a8c 966 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 967reuse_rx:
e52fcb24 968 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
969 goto next_rx;
970 }
036d2df9 971 }
9f6c9258 972
036d2df9
DK
973 skb_put(skb, len);
974 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 975
036d2df9 976 /* Set Toeplitz hash for a none-LRO skb */
a334b5fb
ED
977 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
978 skb->l4_rxhash = l4_rxhash;
9f6c9258 979
036d2df9 980 skb_checksum_none_assert(skb);
f85582f8 981
d6cb3e41 982 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
983 bnx2x_csum_validate(skb, cqe, fp,
984 bnx2x_fp_qstats(bp, fp));
9f6c9258 985
f233cafe 986 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 987
619c5cb6
VZ
988 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
989 PARSING_FLAGS_VLAN)
86a9bad3 990 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
619c5cb6 991 le16_to_cpu(cqe_fp->vlan_tag));
9f6c9258 992
8f20aa57
DK
993 skb_mark_ll(skb, &fp->napi);
994
995 if (bnx2x_fp_ll_polling(fp))
996 netif_receive_skb(skb);
997 else
998 napi_gro_receive(&fp->napi, skb);
9f6c9258 999next_rx:
e52fcb24 1000 rx_buf->data = NULL;
9f6c9258
DK
1001
1002 bd_cons = NEXT_RX_IDX(bd_cons);
1003 bd_prod = NEXT_RX_IDX(bd_prod);
1004 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1005 rx_pkt++;
1006next_cqe:
1007 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1008 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1009
75b29459
DK
1010 /* mark CQE as free */
1011 BNX2X_SEED_CQE(cqe_fp);
1012
9f6c9258
DK
1013 if (rx_pkt == budget)
1014 break;
75b29459
DK
1015
1016 comp_ring_cons = RCQ_BD(sw_comp_cons);
1017 cqe = &fp->rx_comp_ring[comp_ring_cons];
1018 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
1019 } /* while */
1020
1021 fp->rx_bd_cons = bd_cons;
1022 fp->rx_bd_prod = bd_prod_fw;
1023 fp->rx_comp_cons = sw_comp_cons;
1024 fp->rx_comp_prod = sw_comp_prod;
1025
1026 /* Update producers */
1027 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1028 fp->rx_sge_prod);
1029
1030 fp->rx_pkt += rx_pkt;
1031 fp->rx_calls++;
1032
1033 return rx_pkt;
1034}
1035
1036static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1037{
1038 struct bnx2x_fastpath *fp = fp_cookie;
1039 struct bnx2x *bp = fp->bp;
6383c0b3 1040 u8 cos;
9f6c9258 1041
51c1a580
MS
1042 DP(NETIF_MSG_INTR,
1043 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3 1044 fp->index, fp->fw_sb_id, fp->igu_sb_id);
ecf01c22 1045
523224a3 1046 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1047
1048#ifdef BNX2X_STOP_ON_ERROR
1049 if (unlikely(bp->panic))
1050 return IRQ_HANDLED;
1051#endif
1052
1053 /* Handle Rx and Tx according to MSI-X vector */
6383c0b3 1054 for_each_cos_in_tx_queue(fp, cos)
65565884 1055 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1056
523224a3 1057 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
1058 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1059
1060 return IRQ_HANDLED;
1061}
1062
9f6c9258
DK
1063/* HW Lock for shared dual port PHYs */
1064void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1065{
1066 mutex_lock(&bp->port.phy_mutex);
1067
8203c4b6 1068 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1069}
1070
1071void bnx2x_release_phy_lock(struct bnx2x *bp)
1072{
8203c4b6 1073 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1074
1075 mutex_unlock(&bp->port.phy_mutex);
1076}
1077
0793f83f
DK
1078/* calculates MF speed according to current linespeed and MF configuration */
1079u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1080{
1081 u16 line_speed = bp->link_vars.line_speed;
1082 if (IS_MF(bp)) {
faa6fcbb
DK
1083 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1084 bp->mf_config[BP_VN(bp)]);
1085
1086 /* Calculate the current MAX line speed limit for the MF
1087 * devices
0793f83f 1088 */
faa6fcbb
DK
1089 if (IS_MF_SI(bp))
1090 line_speed = (line_speed * maxCfg) / 100;
1091 else { /* SD mode */
0793f83f
DK
1092 u16 vn_max_rate = maxCfg * 100;
1093
1094 if (vn_max_rate < line_speed)
1095 line_speed = vn_max_rate;
faa6fcbb 1096 }
0793f83f
DK
1097 }
1098
1099 return line_speed;
1100}
1101
2ae17f66
VZ
1102/**
1103 * bnx2x_fill_report_data - fill link report data to report
1104 *
1105 * @bp: driver handle
1106 * @data: link state to update
1107 *
1108 * It uses a none-atomic bit operations because is called under the mutex.
1109 */
1191cb83
ED
1110static void bnx2x_fill_report_data(struct bnx2x *bp,
1111 struct bnx2x_link_report_data *data)
2ae17f66
VZ
1112{
1113 u16 line_speed = bnx2x_get_mf_speed(bp);
1114
1115 memset(data, 0, sizeof(*data));
1116
16a5fd92 1117 /* Fill the report data: effective line speed */
2ae17f66
VZ
1118 data->line_speed = line_speed;
1119
1120 /* Link is down */
1121 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1122 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1123 &data->link_report_flags);
1124
1125 /* Full DUPLEX */
1126 if (bp->link_vars.duplex == DUPLEX_FULL)
1127 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1128
1129 /* Rx Flow Control is ON */
1130 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1131 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1132
1133 /* Tx Flow Control is ON */
1134 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1135 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1136}
1137
1138/**
1139 * bnx2x_link_report - report link status to OS.
1140 *
1141 * @bp: driver handle
1142 *
1143 * Calls the __bnx2x_link_report() under the same locking scheme
1144 * as a link/PHY state managing code to ensure a consistent link
1145 * reporting.
1146 */
1147
9f6c9258
DK
1148void bnx2x_link_report(struct bnx2x *bp)
1149{
2ae17f66
VZ
1150 bnx2x_acquire_phy_lock(bp);
1151 __bnx2x_link_report(bp);
1152 bnx2x_release_phy_lock(bp);
1153}
9f6c9258 1154
2ae17f66
VZ
1155/**
1156 * __bnx2x_link_report - report link status to OS.
1157 *
1158 * @bp: driver handle
1159 *
16a5fd92 1160 * None atomic implementation.
2ae17f66
VZ
1161 * Should be called under the phy_lock.
1162 */
1163void __bnx2x_link_report(struct bnx2x *bp)
1164{
1165 struct bnx2x_link_report_data cur_data;
9f6c9258 1166
2ae17f66 1167 /* reread mf_cfg */
ad5afc89 1168 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1169 bnx2x_read_mf_cfg(bp);
1170
1171 /* Read the current link report info */
1172 bnx2x_fill_report_data(bp, &cur_data);
1173
1174 /* Don't report link down or exactly the same link status twice */
1175 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1176 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1177 &bp->last_reported_link.link_report_flags) &&
1178 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1179 &cur_data.link_report_flags)))
1180 return;
1181
1182 bp->link_cnt++;
9f6c9258 1183
2ae17f66
VZ
1184 /* We are going to report a new link parameters now -
1185 * remember the current data for the next time.
1186 */
1187 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1188
2ae17f66
VZ
1189 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1190 &cur_data.link_report_flags)) {
1191 netif_carrier_off(bp->dev);
1192 netdev_err(bp->dev, "NIC Link is Down\n");
1193 return;
1194 } else {
94f05b0f
JP
1195 const char *duplex;
1196 const char *flow;
1197
2ae17f66 1198 netif_carrier_on(bp->dev);
9f6c9258 1199
2ae17f66
VZ
1200 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1201 &cur_data.link_report_flags))
94f05b0f 1202 duplex = "full";
9f6c9258 1203 else
94f05b0f 1204 duplex = "half";
9f6c9258 1205
2ae17f66
VZ
1206 /* Handle the FC at the end so that only these flags would be
1207 * possibly set. This way we may easily check if there is no FC
1208 * enabled.
1209 */
1210 if (cur_data.link_report_flags) {
1211 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1212 &cur_data.link_report_flags)) {
2ae17f66
VZ
1213 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1214 &cur_data.link_report_flags))
94f05b0f
JP
1215 flow = "ON - receive & transmit";
1216 else
1217 flow = "ON - receive";
9f6c9258 1218 } else {
94f05b0f 1219 flow = "ON - transmit";
9f6c9258 1220 }
94f05b0f
JP
1221 } else {
1222 flow = "none";
9f6c9258 1223 }
94f05b0f
JP
1224 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1225 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1226 }
1227}
1228
1191cb83
ED
1229static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1230{
1231 int i;
1232
1233 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1234 struct eth_rx_sge *sge;
1235
1236 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1237 sge->addr_hi =
1238 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1239 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1240
1241 sge->addr_lo =
1242 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1243 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1244 }
1245}
1246
1247static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1248 struct bnx2x_fastpath *fp, int last)
1249{
1250 int i;
1251
1252 for (i = 0; i < last; i++) {
1253 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1254 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1255 u8 *data = first_buf->data;
1256
1257 if (data == NULL) {
1258 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1259 continue;
1260 }
1261 if (tpa_info->tpa_state == BNX2X_TPA_START)
1262 dma_unmap_single(&bp->pdev->dev,
1263 dma_unmap_addr(first_buf, mapping),
1264 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1265 bnx2x_frag_free(fp, data);
1191cb83
ED
1266 first_buf->data = NULL;
1267 }
1268}
1269
55c11941
MS
1270void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1271{
1272 int j;
1273
1274 for_each_rx_queue_cnic(bp, j) {
1275 struct bnx2x_fastpath *fp = &bp->fp[j];
1276
1277 fp->rx_bd_cons = 0;
1278
1279 /* Activate BD ring */
1280 /* Warning!
1281 * this will generate an interrupt (to the TSTORM)
1282 * must only be done after chip is initialized
1283 */
1284 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1285 fp->rx_sge_prod);
1286 }
1287}
1288
9f6c9258
DK
1289void bnx2x_init_rx_rings(struct bnx2x *bp)
1290{
1291 int func = BP_FUNC(bp);
523224a3 1292 u16 ring_prod;
9f6c9258 1293 int i, j;
25141580 1294
b3b83c3f 1295 /* Allocate TPA resources */
55c11941 1296 for_each_eth_queue(bp, j) {
523224a3 1297 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1298
a8c94b91
VZ
1299 DP(NETIF_MSG_IFUP,
1300 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1301
523224a3 1302 if (!fp->disable_tpa) {
16a5fd92 1303 /* Fill the per-aggregation pool */
dfacf138 1304 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1305 struct bnx2x_agg_info *tpa_info =
1306 &fp->tpa_info[i];
1307 struct sw_rx_bd *first_buf =
1308 &tpa_info->first_buf;
1309
d46d132c 1310 first_buf->data = bnx2x_frag_alloc(fp);
e52fcb24 1311 if (!first_buf->data) {
51c1a580
MS
1312 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1313 j);
9f6c9258
DK
1314 bnx2x_free_tpa_pool(bp, fp, i);
1315 fp->disable_tpa = 1;
1316 break;
1317 }
619c5cb6
VZ
1318 dma_unmap_addr_set(first_buf, mapping, 0);
1319 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1320 }
523224a3
DK
1321
1322 /* "next page" elements initialization */
1323 bnx2x_set_next_page_sgl(fp);
1324
1325 /* set SGEs bit mask */
1326 bnx2x_init_sge_ring_bit_mask(fp);
1327
1328 /* Allocate SGEs and initialize the ring elements */
1329 for (i = 0, ring_prod = 0;
1330 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1331
1332 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
51c1a580
MS
1333 BNX2X_ERR("was only able to allocate %d rx sges\n",
1334 i);
1335 BNX2X_ERR("disabling TPA for queue[%d]\n",
1336 j);
523224a3 1337 /* Cleanup already allocated elements */
619c5cb6
VZ
1338 bnx2x_free_rx_sge_range(bp, fp,
1339 ring_prod);
1340 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1341 MAX_AGG_QS(bp));
523224a3
DK
1342 fp->disable_tpa = 1;
1343 ring_prod = 0;
1344 break;
1345 }
1346 ring_prod = NEXT_SGE_IDX(ring_prod);
1347 }
1348
1349 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1350 }
1351 }
1352
55c11941 1353 for_each_eth_queue(bp, j) {
9f6c9258
DK
1354 struct bnx2x_fastpath *fp = &bp->fp[j];
1355
1356 fp->rx_bd_cons = 0;
9f6c9258 1357
b3b83c3f
DK
1358 /* Activate BD ring */
1359 /* Warning!
1360 * this will generate an interrupt (to the TSTORM)
1361 * must only be done after chip is initialized
1362 */
1363 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1364 fp->rx_sge_prod);
9f6c9258 1365
9f6c9258
DK
1366 if (j != 0)
1367 continue;
1368
619c5cb6 1369 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1370 REG_WR(bp, BAR_USTRORM_INTMEM +
1371 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1372 U64_LO(fp->rx_comp_mapping));
1373 REG_WR(bp, BAR_USTRORM_INTMEM +
1374 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1375 U64_HI(fp->rx_comp_mapping));
1376 }
9f6c9258
DK
1377 }
1378}
f85582f8 1379
55c11941 1380static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1381{
6383c0b3 1382 u8 cos;
55c11941 1383 struct bnx2x *bp = fp->bp;
9f6c9258 1384
55c11941
MS
1385 for_each_cos_in_tx_queue(fp, cos) {
1386 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1387 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1388
55c11941
MS
1389 u16 sw_prod = txdata->tx_pkt_prod;
1390 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1391
55c11941
MS
1392 while (sw_cons != sw_prod) {
1393 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1394 &pkts_compl, &bytes_compl);
1395 sw_cons++;
9f6c9258 1396 }
55c11941
MS
1397
1398 netdev_tx_reset_queue(
1399 netdev_get_tx_queue(bp->dev,
1400 txdata->txq_index));
1401 }
1402}
1403
1404static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1405{
1406 int i;
1407
1408 for_each_tx_queue_cnic(bp, i) {
1409 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1410 }
1411}
1412
1413static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1414{
1415 int i;
1416
1417 for_each_eth_queue(bp, i) {
1418 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1419 }
1420}
1421
b3b83c3f
DK
1422static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1423{
1424 struct bnx2x *bp = fp->bp;
1425 int i;
1426
1427 /* ring wasn't allocated */
1428 if (fp->rx_buf_ring == NULL)
1429 return;
1430
1431 for (i = 0; i < NUM_RX_BD; i++) {
1432 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1433 u8 *data = rx_buf->data;
b3b83c3f 1434
e52fcb24 1435 if (data == NULL)
b3b83c3f 1436 continue;
b3b83c3f
DK
1437 dma_unmap_single(&bp->pdev->dev,
1438 dma_unmap_addr(rx_buf, mapping),
1439 fp->rx_buf_size, DMA_FROM_DEVICE);
1440
e52fcb24 1441 rx_buf->data = NULL;
d46d132c 1442 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1443 }
1444}
1445
55c11941
MS
1446static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1447{
1448 int j;
1449
1450 for_each_rx_queue_cnic(bp, j) {
1451 bnx2x_free_rx_bds(&bp->fp[j]);
1452 }
1453}
1454
9f6c9258
DK
1455static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1456{
b3b83c3f 1457 int j;
9f6c9258 1458
55c11941 1459 for_each_eth_queue(bp, j) {
9f6c9258
DK
1460 struct bnx2x_fastpath *fp = &bp->fp[j];
1461
b3b83c3f 1462 bnx2x_free_rx_bds(fp);
9f6c9258 1463
9f6c9258 1464 if (!fp->disable_tpa)
dfacf138 1465 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1466 }
1467}
1468
55c11941
MS
1469void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1470{
1471 bnx2x_free_tx_skbs_cnic(bp);
1472 bnx2x_free_rx_skbs_cnic(bp);
1473}
1474
9f6c9258
DK
1475void bnx2x_free_skbs(struct bnx2x *bp)
1476{
1477 bnx2x_free_tx_skbs(bp);
1478 bnx2x_free_rx_skbs(bp);
1479}
1480
e3835b99
DK
1481void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1482{
1483 /* load old values */
1484 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1485
1486 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1487 /* leave all but MAX value */
1488 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1489
1490 /* set new MAX value */
1491 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1492 & FUNC_MF_CFG_MAX_BW_MASK;
1493
1494 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1495 }
1496}
1497
ca92429f
DK
1498/**
1499 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1500 *
1501 * @bp: driver handle
1502 * @nvecs: number of vectors to be released
1503 */
1504static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1505{
ca92429f 1506 int i, offset = 0;
9f6c9258 1507
ca92429f
DK
1508 if (nvecs == offset)
1509 return;
ad5afc89
AE
1510
1511 /* VFs don't have a default SB */
1512 if (IS_PF(bp)) {
1513 free_irq(bp->msix_table[offset].vector, bp->dev);
1514 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1515 bp->msix_table[offset].vector);
1516 offset++;
1517 }
55c11941
MS
1518
1519 if (CNIC_SUPPORT(bp)) {
1520 if (nvecs == offset)
1521 return;
1522 offset++;
1523 }
ca92429f 1524
ec6ba945 1525 for_each_eth_queue(bp, i) {
ca92429f
DK
1526 if (nvecs == offset)
1527 return;
51c1a580
MS
1528 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1529 i, bp->msix_table[offset].vector);
9f6c9258 1530
ca92429f 1531 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1532 }
1533}
1534
d6214d7a 1535void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1536{
30a5de77 1537 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1538 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1539 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1540
1541 /* vfs don't have a default status block */
1542 if (IS_PF(bp))
1543 nvecs++;
1544
1545 bnx2x_free_msix_irqs(bp, nvecs);
1546 } else {
30a5de77 1547 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1548 }
9f6c9258
DK
1549}
1550
0e8d2ec5 1551int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1552{
1ab4434c 1553 int msix_vec = 0, i, rc;
9f6c9258 1554
1ab4434c
AE
1555 /* VFs don't have a default status block */
1556 if (IS_PF(bp)) {
1557 bp->msix_table[msix_vec].entry = msix_vec;
1558 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1559 bp->msix_table[0].entry);
1560 msix_vec++;
1561 }
9f6c9258 1562
55c11941
MS
1563 /* Cnic requires an msix vector for itself */
1564 if (CNIC_SUPPORT(bp)) {
1565 bp->msix_table[msix_vec].entry = msix_vec;
1566 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1567 msix_vec, bp->msix_table[msix_vec].entry);
1568 msix_vec++;
1569 }
1570
6383c0b3 1571 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1572 for_each_eth_queue(bp, i) {
d6214d7a 1573 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1574 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1575 msix_vec, msix_vec, i);
d6214d7a 1576 msix_vec++;
9f6c9258
DK
1577 }
1578
1ab4434c
AE
1579 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1580 msix_vec);
d6214d7a 1581
1ab4434c 1582 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
9f6c9258
DK
1583
1584 /*
1585 * reconfigure number of tx/rx queues according to available
1586 * MSI-X vectors
1587 */
55c11941 1588 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
d6214d7a 1589 /* how less vectors we will have? */
1ab4434c 1590 int diff = msix_vec - rc;
9f6c9258 1591
51c1a580 1592 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
9f6c9258
DK
1593
1594 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1595
1596 if (rc) {
30a5de77
DK
1597 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1598 goto no_msix;
9f6c9258 1599 }
d6214d7a
DK
1600 /*
1601 * decrease number of queues by number of unallocated entries
1602 */
55c11941
MS
1603 bp->num_ethernet_queues -= diff;
1604 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
9f6c9258 1605
51c1a580 1606 BNX2X_DEV_INFO("New queue configuration set: %d\n",
30a5de77
DK
1607 bp->num_queues);
1608 } else if (rc > 0) {
1609 /* Get by with single vector */
1610 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1611 if (rc) {
1612 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1613 rc);
1614 goto no_msix;
1615 }
1616
1617 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1618 bp->flags |= USING_SINGLE_MSIX_FLAG;
1619
55c11941
MS
1620 BNX2X_DEV_INFO("set number of queues to 1\n");
1621 bp->num_ethernet_queues = 1;
1622 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1623 } else if (rc < 0) {
51c1a580 1624 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1625 goto no_msix;
9f6c9258
DK
1626 }
1627
1628 bp->flags |= USING_MSIX_FLAG;
1629
1630 return 0;
30a5de77
DK
1631
1632no_msix:
1633 /* fall to INTx if not enough memory */
1634 if (rc == -ENOMEM)
1635 bp->flags |= DISABLE_MSI_FLAG;
1636
1637 return rc;
9f6c9258
DK
1638}
1639
1640static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1641{
ca92429f 1642 int i, rc, offset = 0;
9f6c9258 1643
ad5afc89
AE
1644 /* no default status block for vf */
1645 if (IS_PF(bp)) {
1646 rc = request_irq(bp->msix_table[offset++].vector,
1647 bnx2x_msix_sp_int, 0,
1648 bp->dev->name, bp->dev);
1649 if (rc) {
1650 BNX2X_ERR("request sp irq failed\n");
1651 return -EBUSY;
1652 }
9f6c9258
DK
1653 }
1654
55c11941
MS
1655 if (CNIC_SUPPORT(bp))
1656 offset++;
1657
ec6ba945 1658 for_each_eth_queue(bp, i) {
9f6c9258
DK
1659 struct bnx2x_fastpath *fp = &bp->fp[i];
1660 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1661 bp->dev->name, i);
1662
d6214d7a 1663 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1664 bnx2x_msix_fp_int, 0, fp->name, fp);
1665 if (rc) {
ca92429f
DK
1666 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1667 bp->msix_table[offset].vector, rc);
1668 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1669 return -EBUSY;
1670 }
1671
d6214d7a 1672 offset++;
9f6c9258
DK
1673 }
1674
ec6ba945 1675 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1676 if (IS_PF(bp)) {
1677 offset = 1 + CNIC_SUPPORT(bp);
1678 netdev_info(bp->dev,
1679 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1680 bp->msix_table[0].vector,
1681 0, bp->msix_table[offset].vector,
1682 i - 1, bp->msix_table[offset + i - 1].vector);
1683 } else {
1684 offset = CNIC_SUPPORT(bp);
1685 netdev_info(bp->dev,
1686 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1687 0, bp->msix_table[offset].vector,
1688 i - 1, bp->msix_table[offset + i - 1].vector);
1689 }
9f6c9258
DK
1690 return 0;
1691}
1692
d6214d7a 1693int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1694{
1695 int rc;
1696
1697 rc = pci_enable_msi(bp->pdev);
1698 if (rc) {
51c1a580 1699 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1700 return -1;
1701 }
1702 bp->flags |= USING_MSI_FLAG;
1703
1704 return 0;
1705}
1706
1707static int bnx2x_req_irq(struct bnx2x *bp)
1708{
1709 unsigned long flags;
30a5de77 1710 unsigned int irq;
9f6c9258 1711
30a5de77 1712 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1713 flags = 0;
1714 else
1715 flags = IRQF_SHARED;
1716
30a5de77
DK
1717 if (bp->flags & USING_MSIX_FLAG)
1718 irq = bp->msix_table[0].vector;
1719 else
1720 irq = bp->pdev->irq;
1721
1722 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1723}
1724
c957d09f 1725static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1726{
1727 int rc = 0;
30a5de77
DK
1728 if (bp->flags & USING_MSIX_FLAG &&
1729 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1730 rc = bnx2x_req_msix_irqs(bp);
1731 if (rc)
1732 return rc;
1733 } else {
619c5cb6
VZ
1734 rc = bnx2x_req_irq(bp);
1735 if (rc) {
1736 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1737 return rc;
1738 }
1739 if (bp->flags & USING_MSI_FLAG) {
1740 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1741 netdev_info(bp->dev, "using MSI IRQ %d\n",
1742 bp->dev->irq);
1743 }
1744 if (bp->flags & USING_MSIX_FLAG) {
1745 bp->dev->irq = bp->msix_table[0].vector;
1746 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1747 bp->dev->irq);
619c5cb6
VZ
1748 }
1749 }
1750
1751 return 0;
1752}
1753
55c11941
MS
1754static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1755{
1756 int i;
1757
8f20aa57
DK
1758 for_each_rx_queue_cnic(bp, i) {
1759 bnx2x_fp_init_lock(&bp->fp[i]);
55c11941 1760 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1761 }
55c11941
MS
1762}
1763
1191cb83 1764static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1765{
1766 int i;
1767
8f20aa57
DK
1768 for_each_eth_queue(bp, i) {
1769 bnx2x_fp_init_lock(&bp->fp[i]);
9f6c9258 1770 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1771 }
9f6c9258
DK
1772}
1773
55c11941
MS
1774static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1775{
1776 int i;
1777
8f20aa57
DK
1778 local_bh_disable();
1779 for_each_rx_queue_cnic(bp, i) {
55c11941 1780 napi_disable(&bnx2x_fp(bp, i, napi));
8f20aa57
DK
1781 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1782 mdelay(1);
1783 }
1784 local_bh_enable();
55c11941
MS
1785}
1786
1191cb83 1787static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1788{
1789 int i;
1790
8f20aa57
DK
1791 local_bh_disable();
1792 for_each_eth_queue(bp, i) {
9f6c9258 1793 napi_disable(&bnx2x_fp(bp, i, napi));
8f20aa57
DK
1794 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1795 mdelay(1);
1796 }
1797 local_bh_enable();
9f6c9258
DK
1798}
1799
1800void bnx2x_netif_start(struct bnx2x *bp)
1801{
4b7ed897
DK
1802 if (netif_running(bp->dev)) {
1803 bnx2x_napi_enable(bp);
55c11941
MS
1804 if (CNIC_LOADED(bp))
1805 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1806 bnx2x_int_enable(bp);
1807 if (bp->state == BNX2X_STATE_OPEN)
1808 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1809 }
1810}
1811
1812void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1813{
1814 bnx2x_int_disable_sync(bp, disable_hw);
1815 bnx2x_napi_disable(bp);
55c11941
MS
1816 if (CNIC_LOADED(bp))
1817 bnx2x_napi_disable_cnic(bp);
9f6c9258 1818}
9f6c9258 1819
8307fa3e
VZ
1820u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1821{
8307fa3e 1822 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1823
55c11941 1824 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1825 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1826 u16 ether_type = ntohs(hdr->h_proto);
1827
1828 /* Skip VLAN tag if present */
1829 if (ether_type == ETH_P_8021Q) {
1830 struct vlan_ethhdr *vhdr =
1831 (struct vlan_ethhdr *)skb->data;
1832
1833 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1834 }
1835
1836 /* If ethertype is FCoE or FIP - use FCoE ring */
1837 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1838 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1839 }
55c11941 1840
cdb9d6ae 1841 /* select a non-FCoE queue */
ada7c19e 1842 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
8307fa3e
VZ
1843}
1844
d6214d7a
DK
1845void bnx2x_set_num_queues(struct bnx2x *bp)
1846{
96305234 1847 /* RSS queues */
55c11941 1848 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1849
a3348722
BW
1850 /* override in STORAGE SD modes */
1851 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
55c11941
MS
1852 bp->num_ethernet_queues = 1;
1853
ec6ba945 1854 /* Add special queues */
55c11941
MS
1855 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1856 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1857
1858 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1859}
1860
cdb9d6ae
VZ
1861/**
1862 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1863 *
1864 * @bp: Driver handle
1865 *
1866 * We currently support for at most 16 Tx queues for each CoS thus we will
1867 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1868 * bp->max_cos.
1869 *
1870 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1871 * index after all ETH L2 indices.
1872 *
1873 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1874 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
16a5fd92 1875 * 16..31,...) with indices that are not coupled with any real Tx queue.
cdb9d6ae
VZ
1876 *
1877 * The proper configuration of skb->queue_mapping is handled by
1878 * bnx2x_select_queue() and __skb_tx_hash().
1879 *
1880 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1881 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1882 */
55c11941 1883static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1884{
6383c0b3 1885 int rc, tx, rx;
ec6ba945 1886
65565884 1887 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1888 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1889
6383c0b3 1890/* account for fcoe queue */
55c11941
MS
1891 if (include_cnic && !NO_FCOE(bp)) {
1892 rx++;
1893 tx++;
6383c0b3 1894 }
6383c0b3
AE
1895
1896 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1897 if (rc) {
1898 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1899 return rc;
1900 }
1901 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1902 if (rc) {
1903 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1904 return rc;
1905 }
1906
51c1a580 1907 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1908 tx, rx);
1909
ec6ba945
VZ
1910 return rc;
1911}
1912
1191cb83 1913static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1914{
1915 int i;
1916
1917 for_each_queue(bp, i) {
1918 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1919 u32 mtu;
a8c94b91
VZ
1920
1921 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1922 if (IS_FCOE_IDX(i))
1923 /*
1924 * Although there are no IP frames expected to arrive to
1925 * this ring we still want to add an
1926 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1927 * overrun attack.
1928 */
e52fcb24 1929 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1930 else
e52fcb24
ED
1931 mtu = bp->dev->mtu;
1932 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1933 IP_HEADER_ALIGNMENT_PADDING +
1934 ETH_OVREHEAD +
1935 mtu +
1936 BNX2X_FW_RX_ALIGN_END;
16a5fd92 1937 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
d46d132c
ED
1938 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1939 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1940 else
1941 fp->rx_frag_size = 0;
a8c94b91
VZ
1942 }
1943}
1944
1191cb83 1945static int bnx2x_init_rss_pf(struct bnx2x *bp)
619c5cb6
VZ
1946{
1947 int i;
619c5cb6
VZ
1948 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1949
16a5fd92 1950 /* Prepare the initial contents for the indirection table if RSS is
619c5cb6
VZ
1951 * enabled
1952 */
5d317c6a
MS
1953 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1954 bp->rss_conf_obj.ind_table[i] =
96305234
DK
1955 bp->fp->cl_id +
1956 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
1957
1958 /*
1959 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1960 * per-port, so if explicit configuration is needed , do it only
1961 * for a PMF.
1962 *
1963 * For 57712 and newer on the other hand it's a per-function
1964 * configuration.
1965 */
5d317c6a 1966 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
1967}
1968
96305234 1969int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
5d317c6a 1970 bool config_hash)
619c5cb6 1971{
3b603066 1972 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
1973
1974 /* Although RSS is meaningless when there is a single HW queue we
1975 * still need it enabled in order to have HW Rx hash generated.
1976 *
1977 * if (!is_eth_multi(bp))
1978 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1979 */
1980
96305234 1981 params.rss_obj = rss_obj;
619c5cb6
VZ
1982
1983 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1984
96305234 1985 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
619c5cb6 1986
96305234
DK
1987 /* RSS configuration */
1988 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1989 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1990 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1991 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
5d317c6a
MS
1992 if (rss_obj->udp_rss_v4)
1993 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1994 if (rss_obj->udp_rss_v6)
1995 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
619c5cb6 1996
96305234
DK
1997 /* Hash bits */
1998 params.rss_result_mask = MULTI_MASK;
619c5cb6 1999
5d317c6a 2000 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 2001
96305234
DK
2002 if (config_hash) {
2003 /* RSS keys */
8376d0bc 2004 prandom_bytes(params.rss_key, sizeof(params.rss_key));
96305234 2005 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
2006 }
2007
2008 return bnx2x_config_rss(bp, &params);
2009}
2010
1191cb83 2011static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 2012{
3b603066 2013 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
2014
2015 /* Prepare parameters for function state transitions */
2016 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2017
2018 func_params.f_obj = &bp->func_obj;
2019 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2020
2021 func_params.params.hw_init.load_phase = load_code;
2022
2023 return bnx2x_func_state_change(bp, &func_params);
2024}
2025
2026/*
2027 * Cleans the object that have internal lists without sending
16a5fd92 2028 * ramrods. Should be run when interrupts are disabled.
619c5cb6 2029 */
7fa6f340 2030void bnx2x_squeeze_objects(struct bnx2x *bp)
619c5cb6
VZ
2031{
2032 int rc;
2033 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2034 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2035 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2036
2037 /***************** Cleanup MACs' object first *************************/
2038
2039 /* Wait for completion of requested */
2040 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2041 /* Perform a dry cleanup */
2042 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2043
2044 /* Clean ETH primary MAC */
2045 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2046 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2047 &ramrod_flags);
2048 if (rc != 0)
2049 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2050
2051 /* Cleanup UC list */
2052 vlan_mac_flags = 0;
2053 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2054 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2055 &ramrod_flags);
2056 if (rc != 0)
2057 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2058
2059 /***************** Now clean mcast object *****************************/
2060 rparam.mcast_obj = &bp->mcast_obj;
2061 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2062
2063 /* Add a DEL command... */
2064 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2065 if (rc < 0)
51c1a580
MS
2066 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2067 rc);
619c5cb6
VZ
2068
2069 /* ...and wait until all pending commands are cleared */
2070 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2071 while (rc != 0) {
2072 if (rc < 0) {
2073 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2074 rc);
2075 return;
2076 }
2077
2078 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2079 }
2080}
2081
2082#ifndef BNX2X_STOP_ON_ERROR
2083#define LOAD_ERROR_EXIT(bp, label) \
2084 do { \
2085 (bp)->state = BNX2X_STATE_ERROR; \
2086 goto label; \
2087 } while (0)
55c11941
MS
2088
2089#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2090 do { \
2091 bp->cnic_loaded = false; \
2092 goto label; \
2093 } while (0)
2094#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2095#define LOAD_ERROR_EXIT(bp, label) \
2096 do { \
2097 (bp)->state = BNX2X_STATE_ERROR; \
2098 (bp)->panic = 1; \
2099 return -EBUSY; \
2100 } while (0)
55c11941
MS
2101#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2102 do { \
2103 bp->cnic_loaded = false; \
2104 (bp)->panic = 1; \
2105 return -EBUSY; \
2106 } while (0)
2107#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2108
ad5afc89
AE
2109static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2110{
2111 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2112 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2113 return;
2114}
2115
2116static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2117{
8db573ba 2118 int num_groups, vf_headroom = 0;
ad5afc89 2119 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2120
ad5afc89
AE
2121 /* number of queues for statistics is number of eth queues + FCoE */
2122 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2123
ad5afc89
AE
2124 /* Total number of FW statistics requests =
2125 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2126 * and fcoe l2 queue) stats + num of queues (which includes another 1
2127 * for fcoe l2 queue if applicable)
2128 */
2129 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2130
8db573ba
AE
2131 /* vf stats appear in the request list, but their data is allocated by
2132 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2133 * it is used to determine where to place the vf stats queries in the
2134 * request struct
2135 */
2136 if (IS_SRIOV(bp))
6411280a 2137 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2138
ad5afc89
AE
2139 /* Request is built from stats_query_header and an array of
2140 * stats_query_cmd_group each of which contains
2141 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2142 * configured in the stats_query_header.
2143 */
2144 num_groups =
8db573ba
AE
2145 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2146 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2147 1 : 0));
2148
8db573ba
AE
2149 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2150 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2151 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2152 num_groups * sizeof(struct stats_query_cmd_group);
2153
2154 /* Data for statistics requests + stats_counter
2155 * stats_counter holds per-STORM counters that are incremented
2156 * when STORM has finished with the current request.
2157 * memory for FCoE offloaded statistics are counted anyway,
2158 * even if they will not be sent.
2159 * VF stats are not accounted for here as the data of VF stats is stored
2160 * in memory allocated by the VF, not here.
2161 */
2162 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2163 sizeof(struct per_pf_stats) +
2164 sizeof(struct fcoe_statistics_params) +
2165 sizeof(struct per_queue_stats) * num_queue_stats +
2166 sizeof(struct stats_counter);
2167
2168 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2169 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2170
2171 /* Set shortcuts */
2172 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2173 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2174 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2175 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2176 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2177 bp->fw_stats_req_sz;
2178
6bf07b8e 2179 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
ad5afc89
AE
2180 U64_HI(bp->fw_stats_req_mapping),
2181 U64_LO(bp->fw_stats_req_mapping));
6bf07b8e 2182 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
ad5afc89
AE
2183 U64_HI(bp->fw_stats_data_mapping),
2184 U64_LO(bp->fw_stats_data_mapping));
2185 return 0;
2186
2187alloc_mem_err:
2188 bnx2x_free_fw_stats_mem(bp);
2189 BNX2X_ERR("Can't allocate FW stats memory\n");
2190 return -ENOMEM;
2191}
2192
2193/* send load request to mcp and analyze response */
2194static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2195{
178135c1
DK
2196 u32 param;
2197
ad5afc89
AE
2198 /* init fw_seq */
2199 bp->fw_seq =
2200 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2201 DRV_MSG_SEQ_NUMBER_MASK);
2202 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2203
2204 /* Get current FW pulse sequence */
2205 bp->fw_drv_pulse_wr_seq =
2206 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2207 DRV_PULSE_SEQ_MASK);
2208 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2209
178135c1
DK
2210 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2211
2212 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2213 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2214
ad5afc89 2215 /* load request */
178135c1 2216 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
ad5afc89
AE
2217
2218 /* if mcp fails to respond we must abort */
2219 if (!(*load_code)) {
2220 BNX2X_ERR("MCP response failure, aborting\n");
2221 return -EBUSY;
2222 }
2223
2224 /* If mcp refused (e.g. other port is in diagnostic mode) we
2225 * must abort
2226 */
2227 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2228 BNX2X_ERR("MCP refused load request, aborting\n");
2229 return -EBUSY;
2230 }
2231 return 0;
2232}
2233
2234/* check whether another PF has already loaded FW to chip. In
2235 * virtualized environments a pf from another VM may have already
2236 * initialized the device including loading FW
2237 */
2238int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2239{
2240 /* is another pf loaded on this engine? */
2241 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2242 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2243 /* build my FW version dword */
2244 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2245 (BCM_5710_FW_MINOR_VERSION << 8) +
2246 (BCM_5710_FW_REVISION_VERSION << 16) +
2247 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2248
2249 /* read loaded FW from chip */
2250 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2251
2252 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2253 loaded_fw, my_fw);
2254
2255 /* abort nic load if version mismatch */
2256 if (my_fw != loaded_fw) {
6bf07b8e 2257 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
452427b0 2258 loaded_fw, my_fw);
ad5afc89
AE
2259 return -EBUSY;
2260 }
2261 }
2262 return 0;
2263}
2264
2265/* returns the "mcp load_code" according to global load_count array */
2266static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2267{
2268 int path = BP_PATH(bp);
2269
2270 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2271 path, load_count[path][0], load_count[path][1],
2272 load_count[path][2]);
2273 load_count[path][0]++;
2274 load_count[path][1 + port]++;
2275 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2276 path, load_count[path][0], load_count[path][1],
2277 load_count[path][2]);
2278 if (load_count[path][0] == 1)
2279 return FW_MSG_CODE_DRV_LOAD_COMMON;
2280 else if (load_count[path][1 + port] == 1)
2281 return FW_MSG_CODE_DRV_LOAD_PORT;
2282 else
2283 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2284}
2285
2286/* mark PMF if applicable */
2287static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2288{
2289 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2290 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2291 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2292 bp->port.pmf = 1;
2293 /* We need the barrier to ensure the ordering between the
2294 * writing to bp->port.pmf here and reading it from the
2295 * bnx2x_periodic_task().
2296 */
2297 smp_mb();
2298 } else {
2299 bp->port.pmf = 0;
452427b0
YM
2300 }
2301
ad5afc89
AE
2302 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2303}
2304
2305static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2306{
2307 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2308 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2309 (bp->common.shmem2_base)) {
2310 if (SHMEM2_HAS(bp, dcc_support))
2311 SHMEM2_WR(bp, dcc_support,
2312 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2313 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2314 if (SHMEM2_HAS(bp, afex_driver_support))
2315 SHMEM2_WR(bp, afex_driver_support,
2316 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2317 }
2318
2319 /* Set AFEX default VLAN tag to an invalid value */
2320 bp->afex_def_vlan_tag = -1;
452427b0
YM
2321}
2322
1191cb83
ED
2323/**
2324 * bnx2x_bz_fp - zero content of the fastpath structure.
2325 *
2326 * @bp: driver handle
2327 * @index: fastpath index to be zeroed
2328 *
2329 * Makes sure the contents of the bp->fp[index].napi is kept
2330 * intact.
2331 */
2332static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2333{
2334 struct bnx2x_fastpath *fp = &bp->fp[index];
65565884 2335 int cos;
1191cb83 2336 struct napi_struct orig_napi = fp->napi;
15192a8c 2337 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
d76a6111 2338
1191cb83 2339 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2340 if (fp->tpa_info)
2341 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2342 sizeof(struct bnx2x_agg_info));
2343 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2344
2345 /* Restore the NAPI object as it has been already initialized */
2346 fp->napi = orig_napi;
15192a8c 2347 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2348 fp->bp = bp;
2349 fp->index = index;
2350 if (IS_ETH_FP(fp))
2351 fp->max_cos = bp->max_cos;
2352 else
2353 /* Special queues support only one CoS */
2354 fp->max_cos = 1;
2355
65565884 2356 /* Init txdata pointers */
65565884
MS
2357 if (IS_FCOE_FP(fp))
2358 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2359 if (IS_ETH_FP(fp))
2360 for_each_cos_in_tx_queue(fp, cos)
2361 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2362 BNX2X_NUM_ETH_QUEUES(bp) + index];
2363
16a5fd92 2364 /* set the tpa flag for each queue. The tpa flag determines the queue
1191cb83
ED
2365 * minimal size so it must be set prior to queue memory allocation
2366 */
2367 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2368 (bp->flags & GRO_ENABLE_FLAG &&
2369 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2370 if (bp->flags & TPA_ENABLE_FLAG)
2371 fp->mode = TPA_MODE_LRO;
2372 else if (bp->flags & GRO_ENABLE_FLAG)
2373 fp->mode = TPA_MODE_GRO;
2374
1191cb83
ED
2375 /* We don't want TPA on an FCoE L2 ring */
2376 if (IS_FCOE_FP(fp))
2377 fp->disable_tpa = 1;
55c11941
MS
2378}
2379
2380int bnx2x_load_cnic(struct bnx2x *bp)
2381{
2382 int i, rc, port = BP_PORT(bp);
2383
2384 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2385
2386 mutex_init(&bp->cnic_mutex);
2387
ad5afc89
AE
2388 if (IS_PF(bp)) {
2389 rc = bnx2x_alloc_mem_cnic(bp);
2390 if (rc) {
2391 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2392 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2393 }
55c11941
MS
2394 }
2395
2396 rc = bnx2x_alloc_fp_mem_cnic(bp);
2397 if (rc) {
2398 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2399 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2400 }
2401
2402 /* Update the number of queues with the cnic queues */
2403 rc = bnx2x_set_real_num_queues(bp, 1);
2404 if (rc) {
2405 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2406 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2407 }
2408
2409 /* Add all CNIC NAPI objects */
2410 bnx2x_add_all_napi_cnic(bp);
2411 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2412 bnx2x_napi_enable_cnic(bp);
2413
2414 rc = bnx2x_init_hw_func_cnic(bp);
2415 if (rc)
2416 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2417
2418 bnx2x_nic_init_cnic(bp);
2419
ad5afc89
AE
2420 if (IS_PF(bp)) {
2421 /* Enable Timer scan */
2422 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2423
2424 /* setup cnic queues */
2425 for_each_cnic_queue(bp, i) {
2426 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2427 if (rc) {
2428 BNX2X_ERR("Queue setup failed\n");
2429 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2430 }
55c11941
MS
2431 }
2432 }
2433
2434 /* Initialize Rx filter. */
2435 netif_addr_lock_bh(bp->dev);
2436 bnx2x_set_rx_mode(bp->dev);
2437 netif_addr_unlock_bh(bp->dev);
2438
2439 /* re-read iscsi info */
2440 bnx2x_get_iscsi_info(bp);
2441 bnx2x_setup_cnic_irq_info(bp);
2442 bnx2x_setup_cnic_info(bp);
2443 bp->cnic_loaded = true;
2444 if (bp->state == BNX2X_STATE_OPEN)
2445 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2446
55c11941
MS
2447 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2448
2449 return 0;
2450
2451#ifndef BNX2X_STOP_ON_ERROR
2452load_error_cnic2:
2453 /* Disable Timer scan */
2454 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2455
2456load_error_cnic1:
2457 bnx2x_napi_disable_cnic(bp);
2458 /* Update the number of queues without the cnic queues */
2459 rc = bnx2x_set_real_num_queues(bp, 0);
2460 if (rc)
2461 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2462load_error_cnic0:
2463 BNX2X_ERR("CNIC-related load failed\n");
2464 bnx2x_free_fp_mem_cnic(bp);
2465 bnx2x_free_mem_cnic(bp);
2466 return rc;
2467#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2468}
2469
9f6c9258
DK
2470/* must be called with rtnl_lock */
2471int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2472{
619c5cb6 2473 int port = BP_PORT(bp);
ad5afc89 2474 int i, rc = 0, load_code = 0;
9f6c9258 2475
55c11941
MS
2476 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2477 DP(NETIF_MSG_IFUP,
2478 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2479
9f6c9258 2480#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2481 if (unlikely(bp->panic)) {
2482 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2483 return -EPERM;
51c1a580 2484 }
9f6c9258
DK
2485#endif
2486
2487 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2488
16a5fd92 2489 /* zero the structure w/o any lock, before SP handler is initialized */
2ae17f66
VZ
2490 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2491 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2492 &bp->last_reported_link.link_report_flags);
2ae17f66 2493
ad5afc89
AE
2494 if (IS_PF(bp))
2495 /* must be called before memory allocation and HW init */
2496 bnx2x_ilt_set_info(bp);
523224a3 2497
6383c0b3
AE
2498 /*
2499 * Zero fastpath structures preserving invariants like napi, which are
2500 * allocated only once, fp index, max_cos, bp pointer.
65565884 2501 * Also set fp->disable_tpa and txdata_ptr.
b3b83c3f 2502 */
51c1a580 2503 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2504 for_each_queue(bp, i)
2505 bnx2x_bz_fp(bp, i);
55c11941
MS
2506 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2507 bp->num_cnic_queues) *
2508 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2509
55c11941 2510 bp->fcoe_init = false;
6383c0b3 2511
a8c94b91
VZ
2512 /* Set the receive queues buffer size */
2513 bnx2x_set_rx_buf_size(bp);
2514
ad5afc89
AE
2515 if (IS_PF(bp)) {
2516 rc = bnx2x_alloc_mem(bp);
2517 if (rc) {
2518 BNX2X_ERR("Unable to allocate bp memory\n");
2519 return rc;
2520 }
2521 }
2522
2523 /* Allocated memory for FW statistics */
2524 if (bnx2x_alloc_fw_stats_mem(bp))
2525 LOAD_ERROR_EXIT(bp, load_error0);
2526
2527 /* need to be done after alloc mem, since it's self adjusting to amount
2528 * of memory available for RSS queues
2529 */
2530 rc = bnx2x_alloc_fp_mem(bp);
2531 if (rc) {
2532 BNX2X_ERR("Unable to allocate memory for fps\n");
2533 LOAD_ERROR_EXIT(bp, load_error0);
2534 }
d6214d7a 2535
8d9ac297
AE
2536 /* request pf to initialize status blocks */
2537 if (IS_VF(bp)) {
2538 rc = bnx2x_vfpf_init(bp);
2539 if (rc)
2540 LOAD_ERROR_EXIT(bp, load_error0);
2541 }
2542
b3b83c3f
DK
2543 /* As long as bnx2x_alloc_mem() may possibly update
2544 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2545 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2546 */
55c11941 2547 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2548 if (rc) {
ec6ba945 2549 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2550 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2551 }
2552
6383c0b3 2553 /* configure multi cos mappings in kernel.
16a5fd92
YM
2554 * this configuration may be overridden by a multi class queue
2555 * discipline or by a dcbx negotiation result.
6383c0b3
AE
2556 */
2557 bnx2x_setup_tc(bp->dev, bp->max_cos);
2558
26614ba5
MS
2559 /* Add all NAPI objects */
2560 bnx2x_add_all_napi(bp);
55c11941 2561 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2562 bnx2x_napi_enable(bp);
2563
ad5afc89
AE
2564 if (IS_PF(bp)) {
2565 /* set pf load just before approaching the MCP */
2566 bnx2x_set_pf_load(bp);
2567
2568 /* if mcp exists send load request and analyze response */
2569 if (!BP_NOMCP(bp)) {
2570 /* attempt to load pf */
2571 rc = bnx2x_nic_load_request(bp, &load_code);
2572 if (rc)
2573 LOAD_ERROR_EXIT(bp, load_error1);
2574
2575 /* what did mcp say? */
2576 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2577 if (rc) {
2578 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2579 LOAD_ERROR_EXIT(bp, load_error2);
2580 }
ad5afc89
AE
2581 } else {
2582 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2583 }
9f6c9258 2584
ad5afc89
AE
2585 /* mark pmf if applicable */
2586 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2587
ad5afc89
AE
2588 /* Init Function state controlling object */
2589 bnx2x__init_func_obj(bp);
6383c0b3 2590
ad5afc89
AE
2591 /* Initialize HW */
2592 rc = bnx2x_init_hw(bp, load_code);
2593 if (rc) {
2594 BNX2X_ERR("HW init failed, aborting\n");
2595 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2596 LOAD_ERROR_EXIT(bp, load_error2);
2597 }
9f6c9258
DK
2598 }
2599
ecf01c22
YM
2600 bnx2x_pre_irq_nic_init(bp);
2601
d6214d7a
DK
2602 /* Connect to IRQs */
2603 rc = bnx2x_setup_irqs(bp);
523224a3 2604 if (rc) {
ad5afc89
AE
2605 BNX2X_ERR("setup irqs failed\n");
2606 if (IS_PF(bp))
2607 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2608 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2609 }
2610
619c5cb6 2611 /* Init per-function objects */
ad5afc89 2612 if (IS_PF(bp)) {
ecf01c22
YM
2613 /* Setup NIC internals and enable interrupts */
2614 bnx2x_post_irq_nic_init(bp, load_code);
2615
ad5afc89 2616 bnx2x_init_bp_objs(bp);
b56e9670 2617 bnx2x_iov_nic_init(bp);
a3348722 2618
ad5afc89
AE
2619 /* Set AFEX default VLAN tag to an invalid value */
2620 bp->afex_def_vlan_tag = -1;
2621 bnx2x_nic_load_afex_dcc(bp, load_code);
2622 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2623 rc = bnx2x_func_start(bp);
2624 if (rc) {
2625 BNX2X_ERR("Function start failed!\n");
2626 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2627
619c5cb6 2628 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2629 }
9f6c9258 2630
ad5afc89
AE
2631 /* Send LOAD_DONE command to MCP */
2632 if (!BP_NOMCP(bp)) {
2633 load_code = bnx2x_fw_command(bp,
2634 DRV_MSG_CODE_LOAD_DONE, 0);
2635 if (!load_code) {
2636 BNX2X_ERR("MCP response failure, aborting\n");
2637 rc = -EBUSY;
2638 LOAD_ERROR_EXIT(bp, load_error3);
2639 }
2640 }
9f6c9258 2641
0c14e5ce
AE
2642 /* initialize FW coalescing state machines in RAM */
2643 bnx2x_update_coalesce(bp);
2644
ad5afc89
AE
2645 /* setup the leading queue */
2646 rc = bnx2x_setup_leading(bp);
51c1a580 2647 if (rc) {
ad5afc89 2648 BNX2X_ERR("Setup leading failed!\n");
55c11941 2649 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2650 }
523224a3 2651
ad5afc89
AE
2652 /* set up the rest of the queues */
2653 for_each_nondefault_eth_queue(bp, i) {
2654 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2655 if (rc) {
2656 BNX2X_ERR("Queue setup failed\n");
2657 LOAD_ERROR_EXIT(bp, load_error3);
2658 }
2659 }
2660
2661 /* setup rss */
2662 rc = bnx2x_init_rss_pf(bp);
2663 if (rc) {
2664 BNX2X_ERR("PF RSS init failed\n");
2665 LOAD_ERROR_EXIT(bp, load_error3);
2666 }
8d9ac297
AE
2667
2668 } else { /* vf */
2669 for_each_eth_queue(bp, i) {
2670 rc = bnx2x_vfpf_setup_q(bp, i);
2671 if (rc) {
2672 BNX2X_ERR("Queue setup failed\n");
2673 LOAD_ERROR_EXIT(bp, load_error3);
2674 }
2675 }
51c1a580 2676 }
619c5cb6 2677
523224a3
DK
2678 /* Now when Clients are configured we are ready to work */
2679 bp->state = BNX2X_STATE_OPEN;
2680
619c5cb6 2681 /* Configure a ucast MAC */
ad5afc89
AE
2682 if (IS_PF(bp))
2683 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297 2684 else /* vf */
f8f4f61a
DK
2685 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2686 true);
51c1a580
MS
2687 if (rc) {
2688 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2689 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2690 }
6e30dd4e 2691
ad5afc89 2692 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2693 bnx2x_update_max_mf_config(bp, bp->pending_max);
2694 bp->pending_max = 0;
2695 }
2696
ad5afc89
AE
2697 if (bp->port.pmf) {
2698 rc = bnx2x_initial_phy_init(bp, load_mode);
2699 if (rc)
2700 LOAD_ERROR_EXIT(bp, load_error3);
2701 }
c63da990 2702 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2703
619c5cb6
VZ
2704 /* Start fast path */
2705
2706 /* Initialize Rx filter. */
2707 netif_addr_lock_bh(bp->dev);
6e30dd4e 2708 bnx2x_set_rx_mode(bp->dev);
619c5cb6 2709 netif_addr_unlock_bh(bp->dev);
6e30dd4e 2710
619c5cb6 2711 /* Start the Tx */
9f6c9258
DK
2712 switch (load_mode) {
2713 case LOAD_NORMAL:
16a5fd92 2714 /* Tx queue should be only re-enabled */
523224a3 2715 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2716 break;
2717
2718 case LOAD_OPEN:
2719 netif_tx_start_all_queues(bp->dev);
523224a3 2720 smp_mb__after_clear_bit();
9f6c9258
DK
2721 break;
2722
2723 case LOAD_DIAG:
8970b2e4 2724 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2725 bp->state = BNX2X_STATE_DIAG;
2726 break;
2727
2728 default:
2729 break;
2730 }
2731
00253a8c 2732 if (bp->port.pmf)
4c704899 2733 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2734 else
9f6c9258
DK
2735 bnx2x__link_status_update(bp);
2736
2737 /* start the timer */
2738 mod_timer(&bp->timer, jiffies + bp->current_interval);
2739
55c11941
MS
2740 if (CNIC_ENABLED(bp))
2741 bnx2x_load_cnic(bp);
9f6c9258 2742
ad5afc89
AE
2743 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2744 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2745 u32 val;
2746 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2747 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2748 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2749 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2750 }
2751
619c5cb6 2752 /* Wait for all pending SP commands to complete */
ad5afc89 2753 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2754 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2755 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2756 return -EBUSY;
2757 }
6891dd25 2758
9876879f
BW
2759 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2760 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2761 bnx2x_dcbx_init(bp, false);
2762
55c11941
MS
2763 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2764
9f6c9258
DK
2765 return 0;
2766
619c5cb6 2767#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2768load_error3:
ad5afc89
AE
2769 if (IS_PF(bp)) {
2770 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2771
ad5afc89
AE
2772 /* Clean queueable objects */
2773 bnx2x_squeeze_objects(bp);
2774 }
619c5cb6 2775
9f6c9258
DK
2776 /* Free SKBs, SGEs, TPA pool and driver internals */
2777 bnx2x_free_skbs(bp);
ec6ba945 2778 for_each_rx_queue(bp, i)
9f6c9258 2779 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2780
9f6c9258 2781 /* Release IRQs */
d6214d7a
DK
2782 bnx2x_free_irq(bp);
2783load_error2:
ad5afc89 2784 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2785 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2786 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2787 }
2788
2789 bp->port.pmf = 0;
9f6c9258
DK
2790load_error1:
2791 bnx2x_napi_disable(bp);
722c6f58 2792 bnx2x_del_all_napi(bp);
ad5afc89 2793
889b9af3 2794 /* clear pf_load status, as it was already set */
ad5afc89
AE
2795 if (IS_PF(bp))
2796 bnx2x_clear_pf_load(bp);
d6214d7a 2797load_error0:
ad5afc89
AE
2798 bnx2x_free_fp_mem(bp);
2799 bnx2x_free_fw_stats_mem(bp);
9f6c9258
DK
2800 bnx2x_free_mem(bp);
2801
2802 return rc;
619c5cb6 2803#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2804}
2805
7fa6f340 2806int bnx2x_drain_tx_queues(struct bnx2x *bp)
ad5afc89
AE
2807{
2808 u8 rc = 0, cos, i;
2809
2810 /* Wait until tx fastpath tasks complete */
2811 for_each_tx_queue(bp, i) {
2812 struct bnx2x_fastpath *fp = &bp->fp[i];
2813
2814 for_each_cos_in_tx_queue(fp, cos)
2815 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2816 if (rc)
2817 return rc;
2818 }
2819 return 0;
2820}
2821
9f6c9258 2822/* must be called with rtnl_lock */
5d07d868 2823int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2824{
2825 int i;
c9ee9206
VZ
2826 bool global = false;
2827
55c11941
MS
2828 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2829
9ce392d4 2830 /* mark driver is unloaded in shmem2 */
ad5afc89 2831 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2832 u32 val;
2833 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2834 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2835 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2836 }
2837
80bfe5cc 2838 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2839 (bp->state == BNX2X_STATE_CLOSED ||
2840 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2841 /* We can get here if the driver has been unloaded
2842 * during parity error recovery and is either waiting for a
2843 * leader to complete or for other functions to unload and
2844 * then ifdown has been issued. In this case we want to
2845 * unload and let other functions to complete a recovery
2846 * process.
2847 */
9f6c9258
DK
2848 bp->recovery_state = BNX2X_RECOVERY_DONE;
2849 bp->is_leader = 0;
c9ee9206
VZ
2850 bnx2x_release_leader_lock(bp);
2851 smp_mb();
2852
51c1a580
MS
2853 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2854 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2855 return -EINVAL;
2856 }
2857
80bfe5cc 2858 /* Nothing to do during unload if previous bnx2x_nic_load()
16a5fd92 2859 * have not completed successfully - all resources are released.
80bfe5cc
YM
2860 *
2861 * we can get here only after unsuccessful ndo_* callback, during which
2862 * dev->IFF_UP flag is still on.
2863 */
2864 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2865 return 0;
2866
2867 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
2868 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2869 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2870 */
2871 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2872 smp_mb();
2873
78c3bcc5
AE
2874 /* indicate to VFs that the PF is going down */
2875 bnx2x_iov_channel_down(bp);
2876
55c11941
MS
2877 if (CNIC_LOADED(bp))
2878 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2879
9505ee37
VZ
2880 /* Stop Tx */
2881 bnx2x_tx_disable(bp);
65565884 2882 netdev_reset_tc(bp->dev);
9505ee37 2883
9f6c9258 2884 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2885
9f6c9258 2886 del_timer_sync(&bp->timer);
f85582f8 2887
ad5afc89
AE
2888 if (IS_PF(bp)) {
2889 /* Set ALWAYS_ALIVE bit in shmem */
2890 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2891 bnx2x_drv_pulse(bp);
2892 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2893 bnx2x_save_statistics(bp);
2894 }
9f6c9258 2895
ad5afc89
AE
2896 /* wait till consumers catch up with producers in all queues */
2897 bnx2x_drain_tx_queues(bp);
9f6c9258 2898
9b176b6b
AE
2899 /* if VF indicate to PF this function is going down (PF will delete sp
2900 * elements and clear initializations
2901 */
2902 if (IS_VF(bp))
2903 bnx2x_vfpf_close_vf(bp);
2904 else if (unload_mode != UNLOAD_RECOVERY)
2905 /* if this is a normal/close unload need to clean up chip*/
5d07d868 2906 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 2907 else {
c9ee9206
VZ
2908 /* Send the UNLOAD_REQUEST to the MCP */
2909 bnx2x_send_unload_req(bp, unload_mode);
2910
16a5fd92 2911 /* Prevent transactions to host from the functions on the
c9ee9206 2912 * engine that doesn't reset global blocks in case of global
16a5fd92 2913 * attention once global blocks are reset and gates are opened
c9ee9206
VZ
2914 * (the engine which leader will perform the recovery
2915 * last).
2916 */
2917 if (!CHIP_IS_E1x(bp))
2918 bnx2x_pf_disable(bp);
2919
2920 /* Disable HW interrupts, NAPI */
523224a3 2921 bnx2x_netif_stop(bp, 1);
26614ba5
MS
2922 /* Delete all NAPI objects */
2923 bnx2x_del_all_napi(bp);
55c11941
MS
2924 if (CNIC_LOADED(bp))
2925 bnx2x_del_all_napi_cnic(bp);
523224a3 2926 /* Release IRQs */
d6214d7a 2927 bnx2x_free_irq(bp);
c9ee9206
VZ
2928
2929 /* Report UNLOAD_DONE to MCP */
5d07d868 2930 bnx2x_send_unload_done(bp, false);
523224a3 2931 }
9f6c9258 2932
619c5cb6 2933 /*
16a5fd92 2934 * At this stage no more interrupts will arrive so we may safely clean
619c5cb6
VZ
2935 * the queueable objects here in case they failed to get cleaned so far.
2936 */
ad5afc89
AE
2937 if (IS_PF(bp))
2938 bnx2x_squeeze_objects(bp);
619c5cb6 2939
79616895
VZ
2940 /* There should be no more pending SP commands at this stage */
2941 bp->sp_state = 0;
2942
9f6c9258
DK
2943 bp->port.pmf = 0;
2944
2945 /* Free SKBs, SGEs, TPA pool and driver internals */
2946 bnx2x_free_skbs(bp);
55c11941
MS
2947 if (CNIC_LOADED(bp))
2948 bnx2x_free_skbs_cnic(bp);
ec6ba945 2949 for_each_rx_queue(bp, i)
9f6c9258 2950 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2951
ad5afc89
AE
2952 bnx2x_free_fp_mem(bp);
2953 if (CNIC_LOADED(bp))
55c11941 2954 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 2955
ad5afc89 2956 if (IS_PF(bp)) {
ad5afc89
AE
2957 if (CNIC_LOADED(bp))
2958 bnx2x_free_mem_cnic(bp);
2f7a3122 2959 bnx2x_free_mem(bp);
ad5afc89 2960 }
9f6c9258 2961 bp->state = BNX2X_STATE_CLOSED;
55c11941 2962 bp->cnic_loaded = false;
9f6c9258 2963
c9ee9206
VZ
2964 /* Check if there are pending parity attentions. If there are - set
2965 * RECOVERY_IN_PROGRESS.
2966 */
ad5afc89 2967 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
2968 bnx2x_set_reset_in_progress(bp);
2969
2970 /* Set RESET_IS_GLOBAL if needed */
2971 if (global)
2972 bnx2x_set_reset_global(bp);
2973 }
2974
9f6c9258
DK
2975 /* The last driver must disable a "close the gate" if there is no
2976 * parity attention or "process kill" pending.
2977 */
ad5afc89
AE
2978 if (IS_PF(bp) &&
2979 !bnx2x_clear_pf_load(bp) &&
2980 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
2981 bnx2x_disable_close_the_gate(bp);
2982
55c11941
MS
2983 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2984
9f6c9258
DK
2985 return 0;
2986}
f85582f8 2987
9f6c9258
DK
2988int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2989{
2990 u16 pmcsr;
2991
adf5f6a1
DK
2992 /* If there is no power capability, silently succeed */
2993 if (!bp->pm_cap) {
51c1a580 2994 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
2995 return 0;
2996 }
2997
9f6c9258
DK
2998 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2999
3000 switch (state) {
3001 case PCI_D0:
3002 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3003 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3004 PCI_PM_CTRL_PME_STATUS));
3005
3006 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3007 /* delay required during transition out of D3hot */
3008 msleep(20);
3009 break;
3010
3011 case PCI_D3hot:
3012 /* If there are other clients above don't
3013 shut down the power */
3014 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3015 return 0;
3016 /* Don't shut down the power for emulation and FPGA */
3017 if (CHIP_REV_IS_SLOW(bp))
3018 return 0;
3019
3020 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3021 pmcsr |= 3;
3022
3023 if (bp->wol)
3024 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3025
3026 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3027 pmcsr);
3028
3029 /* No more memory access after this point until
3030 * device is brought back to D0.
3031 */
3032 break;
3033
3034 default:
51c1a580 3035 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3036 return -EINVAL;
3037 }
3038 return 0;
3039}
3040
9f6c9258
DK
3041/*
3042 * net_device service functions
3043 */
d6214d7a 3044int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
3045{
3046 int work_done = 0;
6383c0b3 3047 u8 cos;
9f6c9258
DK
3048 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3049 napi);
3050 struct bnx2x *bp = fp->bp;
3051
3052 while (1) {
3053#ifdef BNX2X_STOP_ON_ERROR
3054 if (unlikely(bp->panic)) {
3055 napi_complete(napi);
3056 return 0;
3057 }
3058#endif
8f20aa57
DK
3059 if (!bnx2x_fp_lock_napi(fp))
3060 return work_done;
9f6c9258 3061
6383c0b3 3062 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
3063 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3064 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 3065
9f6c9258
DK
3066 if (bnx2x_has_rx_work(fp)) {
3067 work_done += bnx2x_rx_int(fp, budget - work_done);
3068
3069 /* must not complete if we consumed full budget */
8f20aa57
DK
3070 if (work_done >= budget) {
3071 bnx2x_fp_unlock_napi(fp);
9f6c9258 3072 break;
8f20aa57 3073 }
9f6c9258
DK
3074 }
3075
3076 /* Fall out from the NAPI loop if needed */
8f20aa57
DK
3077 if (!bnx2x_fp_unlock_napi(fp) &&
3078 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3079
ec6ba945
VZ
3080 /* No need to update SB for FCoE L2 ring as long as
3081 * it's connected to the default SB and the SB
3082 * has been updated when NAPI was scheduled.
3083 */
3084 if (IS_FCOE_FP(fp)) {
3085 napi_complete(napi);
3086 break;
3087 }
9f6c9258 3088 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3089 /* bnx2x_has_rx_work() reads the status block,
3090 * thus we need to ensure that status block indices
3091 * have been actually read (bnx2x_update_fpsb_idx)
3092 * prior to this check (bnx2x_has_rx_work) so that
3093 * we won't write the "newer" value of the status block
3094 * to IGU (if there was a DMA right after
3095 * bnx2x_has_rx_work and if there is no rmb, the memory
3096 * reading (bnx2x_update_fpsb_idx) may be postponed
3097 * to right before bnx2x_ack_sb). In this case there
3098 * will never be another interrupt until there is
3099 * another update of the status block, while there
3100 * is still unhandled work.
3101 */
9f6c9258
DK
3102 rmb();
3103
3104 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3105 napi_complete(napi);
3106 /* Re-enable interrupts */
51c1a580 3107 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3108 "Update index to %d\n", fp->fp_hc_idx);
3109 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3110 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3111 IGU_INT_ENABLE, 1);
3112 break;
3113 }
3114 }
3115 }
3116
3117 return work_done;
3118}
3119
8f20aa57
DK
3120#ifdef CONFIG_NET_LL_RX_POLL
3121/* must be called with local_bh_disable()d */
3122int bnx2x_low_latency_recv(struct napi_struct *napi)
3123{
3124 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3125 napi);
3126 struct bnx2x *bp = fp->bp;
3127 int found = 0;
3128
3129 if ((bp->state == BNX2X_STATE_CLOSED) ||
3130 (bp->state == BNX2X_STATE_ERROR) ||
3131 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3132 return LL_FLUSH_FAILED;
3133
3134 if (!bnx2x_fp_lock_poll(fp))
3135 return LL_FLUSH_BUSY;
3136
75b29459 3137 if (bnx2x_has_rx_work(fp))
8f20aa57 3138 found = bnx2x_rx_int(fp, 4);
8f20aa57
DK
3139
3140 bnx2x_fp_unlock_poll(fp);
3141
3142 return found;
3143}
3144#endif
3145
9f6c9258
DK
3146/* we split the first BD into headers and data BDs
3147 * to ease the pain of our fellow microcode engineers
3148 * we use one mapping for both BDs
9f6c9258 3149 */
91226790
DK
3150static u16 bnx2x_tx_split(struct bnx2x *bp,
3151 struct bnx2x_fp_txdata *txdata,
3152 struct sw_tx_bd *tx_buf,
3153 struct eth_tx_start_bd **tx_bd, u16 hlen,
3154 u16 bd_prod)
9f6c9258
DK
3155{
3156 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3157 struct eth_tx_bd *d_tx_bd;
3158 dma_addr_t mapping;
3159 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3160
3161 /* first fix first BD */
9f6c9258
DK
3162 h_tx_bd->nbytes = cpu_to_le16(hlen);
3163
91226790
DK
3164 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3165 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3166
3167 /* now get a new data BD
3168 * (after the pbd) and fill it */
3169 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3170 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3171
3172 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3173 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3174
3175 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3176 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3177 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3178
3179 /* this marks the BD as one that has no individual mapping */
3180 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3181
3182 DP(NETIF_MSG_TX_QUEUED,
3183 "TSO split data size is %d (%x:%x)\n",
3184 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3185
3186 /* update tx_bd */
3187 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3188
3189 return bd_prod;
3190}
3191
86564c3f
YM
3192#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3193#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3194static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3195{
86564c3f
YM
3196 __sum16 tsum = (__force __sum16) csum;
3197
9f6c9258 3198 if (fix > 0)
86564c3f
YM
3199 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3200 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3201
3202 else if (fix < 0)
86564c3f
YM
3203 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3204 csum_partial(t_header, -fix, 0)));
9f6c9258 3205
e2593fcd 3206 return bswab16(tsum);
9f6c9258
DK
3207}
3208
91226790 3209static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3210{
3211 u32 rc;
a848ade4
DK
3212 __u8 prot = 0;
3213 __be16 protocol;
9f6c9258
DK
3214
3215 if (skb->ip_summed != CHECKSUM_PARTIAL)
a848ade4 3216 return XMIT_PLAIN;
9f6c9258 3217
a848ade4
DK
3218 protocol = vlan_get_protocol(skb);
3219 if (protocol == htons(ETH_P_IPV6)) {
3220 rc = XMIT_CSUM_V6;
3221 prot = ipv6_hdr(skb)->nexthdr;
3222 } else {
3223 rc = XMIT_CSUM_V4;
3224 prot = ip_hdr(skb)->protocol;
3225 }
9f6c9258 3226
a848ade4
DK
3227 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3228 if (inner_ip_hdr(skb)->version == 6) {
3229 rc |= XMIT_CSUM_ENC_V6;
3230 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3231 rc |= XMIT_CSUM_TCP;
9f6c9258 3232 } else {
a848ade4
DK
3233 rc |= XMIT_CSUM_ENC_V4;
3234 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
9f6c9258
DK
3235 rc |= XMIT_CSUM_TCP;
3236 }
3237 }
a848ade4
DK
3238 if (prot == IPPROTO_TCP)
3239 rc |= XMIT_CSUM_TCP;
9f6c9258 3240
a848ade4 3241 if (skb_is_gso_v6(skb)) {
e768fb29 3242 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
a848ade4
DK
3243 if (rc & XMIT_CSUM_ENC)
3244 rc |= XMIT_GSO_ENC_V6;
3245 } else if (skb_is_gso(skb)) {
e768fb29 3246 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
a848ade4
DK
3247 if (rc & XMIT_CSUM_ENC)
3248 rc |= XMIT_GSO_ENC_V4;
3249 }
9f6c9258
DK
3250
3251 return rc;
3252}
3253
3254#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3255/* check if packet requires linearization (packet is too fragmented)
3256 no need to check fragmentation if page size > 8K (there will be no
3257 violation to FW restrictions) */
3258static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3259 u32 xmit_type)
3260{
3261 int to_copy = 0;
3262 int hlen = 0;
3263 int first_bd_sz = 0;
3264
3265 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3266 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3267
3268 if (xmit_type & XMIT_GSO) {
3269 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3270 /* Check if LSO packet needs to be copied:
3271 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3272 int wnd_size = MAX_FETCH_BD - 3;
3273 /* Number of windows to check */
3274 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3275 int wnd_idx = 0;
3276 int frag_idx = 0;
3277 u32 wnd_sum = 0;
3278
3279 /* Headers length */
3280 hlen = (int)(skb_transport_header(skb) - skb->data) +
3281 tcp_hdrlen(skb);
3282
3283 /* Amount of data (w/o headers) on linear part of SKB*/
3284 first_bd_sz = skb_headlen(skb) - hlen;
3285
3286 wnd_sum = first_bd_sz;
3287
3288 /* Calculate the first sum - it's special */
3289 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3290 wnd_sum +=
9e903e08 3291 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3292
3293 /* If there was data on linear skb data - check it */
3294 if (first_bd_sz > 0) {
3295 if (unlikely(wnd_sum < lso_mss)) {
3296 to_copy = 1;
3297 goto exit_lbl;
3298 }
3299
3300 wnd_sum -= first_bd_sz;
3301 }
3302
3303 /* Others are easier: run through the frag list and
3304 check all windows */
3305 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3306 wnd_sum +=
9e903e08 3307 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3308
3309 if (unlikely(wnd_sum < lso_mss)) {
3310 to_copy = 1;
3311 break;
3312 }
3313 wnd_sum -=
9e903e08 3314 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3315 }
3316 } else {
3317 /* in non-LSO too fragmented packet should always
3318 be linearized */
3319 to_copy = 1;
3320 }
3321 }
3322
3323exit_lbl:
3324 if (unlikely(to_copy))
3325 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3326 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3327 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3328 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3329
3330 return to_copy;
3331}
3332#endif
3333
91226790
DK
3334static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3335 u32 xmit_type)
f2e0899f 3336{
a848ade4
DK
3337 struct ipv6hdr *ipv6;
3338
2297a2da
VZ
3339 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3340 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3341 ETH_TX_PARSE_BD_E2_LSO_MSS;
a848ade4
DK
3342
3343 if (xmit_type & XMIT_GSO_ENC_V6)
3344 ipv6 = inner_ipv6_hdr(skb);
3345 else if (xmit_type & XMIT_GSO_V6)
3346 ipv6 = ipv6_hdr(skb);
3347 else
3348 ipv6 = NULL;
3349
3350 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
2297a2da 3351 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
3352}
3353
3354/**
e8920674 3355 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3356 *
e8920674
DK
3357 * @skb: packet skb
3358 * @pbd: parse BD
3359 * @xmit_type: xmit flags
f2e0899f 3360 */
91226790
DK
3361static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3362 struct eth_tx_parse_bd_e1x *pbd,
057cf65e 3363 struct eth_tx_start_bd *tx_start_bd,
91226790 3364 u32 xmit_type)
f2e0899f
DK
3365{
3366 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3367 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3368 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3369
3370 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3371 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3372 pbd->tcp_pseudo_csum =
86564c3f
YM
3373 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3374 ip_hdr(skb)->daddr,
3375 0, IPPROTO_TCP, 0));
f2e0899f 3376
057cf65e
YM
3377 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3378 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3379 } else {
f2e0899f 3380 pbd->tcp_pseudo_csum =
86564c3f
YM
3381 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3382 &ipv6_hdr(skb)->daddr,
3383 0, IPPROTO_TCP, 0));
057cf65e 3384 }
f2e0899f 3385
86564c3f
YM
3386 pbd->global_data |=
3387 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3388}
f85582f8 3389
a848ade4
DK
3390/**
3391 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3392 *
3393 * @bp: driver handle
3394 * @skb: packet skb
3395 * @parsing_data: data to be updated
3396 * @xmit_type: xmit flags
3397 *
3398 * 57712/578xx related, when skb has encapsulation
3399 */
3400static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3401 u32 *parsing_data, u32 xmit_type)
3402{
3403 *parsing_data |=
3404 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3405 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3406 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3407
3408 if (xmit_type & XMIT_CSUM_TCP) {
3409 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3410 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3411 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3412
3413 return skb_inner_transport_header(skb) +
3414 inner_tcp_hdrlen(skb) - skb->data;
3415 }
3416
3417 /* We support checksum offload for TCP and UDP only.
3418 * No need to pass the UDP header length - it's a constant.
3419 */
3420 return skb_inner_transport_header(skb) +
3421 sizeof(struct udphdr) - skb->data;
3422}
3423
f2e0899f 3424/**
e8920674 3425 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3426 *
e8920674
DK
3427 * @bp: driver handle
3428 * @skb: packet skb
3429 * @parsing_data: data to be updated
3430 * @xmit_type: xmit flags
f2e0899f 3431 *
91226790 3432 * 57712/578xx related
f2e0899f 3433 */
91226790
DK
3434static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3435 u32 *parsing_data, u32 xmit_type)
f2e0899f 3436{
e39aece7 3437 *parsing_data |=
2de67439 3438 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3439 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3440 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3441
e39aece7
VZ
3442 if (xmit_type & XMIT_CSUM_TCP) {
3443 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3444 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3445 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3446
e39aece7 3447 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3448 }
3449 /* We support checksum offload for TCP and UDP only.
3450 * No need to pass the UDP header length - it's a constant.
3451 */
3452 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3453}
3454
a848ade4 3455/* set FW indication according to inner or outer protocols if tunneled */
91226790
DK
3456static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3457 struct eth_tx_start_bd *tx_start_bd,
3458 u32 xmit_type)
93ef5c02 3459{
93ef5c02
DK
3460 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3461
a848ade4 3462 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
91226790 3463 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3464
3465 if (!(xmit_type & XMIT_CSUM_TCP))
3466 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3467}
3468
f2e0899f 3469/**
e8920674 3470 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3471 *
e8920674
DK
3472 * @bp: driver handle
3473 * @skb: packet skb
3474 * @pbd: parse BD to be updated
3475 * @xmit_type: xmit flags
f2e0899f 3476 */
91226790
DK
3477static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3478 struct eth_tx_parse_bd_e1x *pbd,
3479 u32 xmit_type)
f2e0899f 3480{
e39aece7 3481 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3482
3483 /* for now NS flag is not used in Linux */
3484 pbd->global_data =
86564c3f
YM
3485 cpu_to_le16(hlen |
3486 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3487 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3488
3489 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3490 skb_network_header(skb)) >> 1;
f2e0899f 3491
e39aece7
VZ
3492 hlen += pbd->ip_hlen_w;
3493
3494 /* We support checksum offload for TCP and UDP only */
3495 if (xmit_type & XMIT_CSUM_TCP)
3496 hlen += tcp_hdrlen(skb) / 2;
3497 else
3498 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3499
3500 pbd->total_hlen_w = cpu_to_le16(hlen);
3501 hlen = hlen*2;
3502
3503 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3504 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3505
3506 } else {
3507 s8 fix = SKB_CS_OFF(skb); /* signed! */
3508
3509 DP(NETIF_MSG_TX_QUEUED,
3510 "hlen %d fix %d csum before fix %x\n",
3511 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3512
3513 /* HW bug: fixup the CSUM */
3514 pbd->tcp_pseudo_csum =
3515 bnx2x_csum_fix(skb_transport_header(skb),
3516 SKB_CS(skb), fix);
3517
3518 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3519 pbd->tcp_pseudo_csum);
3520 }
3521
3522 return hlen;
3523}
f85582f8 3524
a848ade4
DK
3525static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3526 struct eth_tx_parse_bd_e2 *pbd_e2,
3527 struct eth_tx_parse_2nd_bd *pbd2,
3528 u16 *global_data,
3529 u32 xmit_type)
3530{
e287a75c 3531 u16 hlen_w = 0;
a848ade4 3532 u8 outerip_off, outerip_len = 0;
e768fb29 3533
e287a75c
DK
3534 /* from outer IP to transport */
3535 hlen_w = (skb_inner_transport_header(skb) -
3536 skb_network_header(skb)) >> 1;
a848ade4
DK
3537
3538 /* transport len */
e768fb29 3539 hlen_w += inner_tcp_hdrlen(skb) >> 1;
a848ade4 3540
e287a75c 3541 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
a848ade4 3542
e768fb29
DK
3543 /* outer IP header info */
3544 if (xmit_type & XMIT_CSUM_V4) {
e287a75c 3545 struct iphdr *iph = ip_hdr(skb);
c957d09f
YM
3546 u16 csum = (__force u16)(~iph->check) -
3547 (__force u16)iph->tot_len -
3548 (__force u16)iph->frag_off;
3549
a848ade4 3550 pbd2->fw_ip_csum_wo_len_flags_frag =
c957d09f 3551 bswab16(csum_fold((__force __wsum)csum));
a848ade4
DK
3552 } else {
3553 pbd2->fw_ip_hdr_to_payload_w =
e287a75c 3554 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
a848ade4
DK
3555 }
3556
3557 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3558
3559 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3560
3561 if (xmit_type & XMIT_GSO_V4) {
e287a75c 3562 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
a848ade4
DK
3563
3564 pbd_e2->data.tunnel_data.pseudo_csum =
3565 bswab16(~csum_tcpudp_magic(
3566 inner_ip_hdr(skb)->saddr,
3567 inner_ip_hdr(skb)->daddr,
3568 0, IPPROTO_TCP, 0));
3569
3570 outerip_len = ip_hdr(skb)->ihl << 1;
3571 } else {
3572 pbd_e2->data.tunnel_data.pseudo_csum =
3573 bswab16(~csum_ipv6_magic(
3574 &inner_ipv6_hdr(skb)->saddr,
3575 &inner_ipv6_hdr(skb)->daddr,
3576 0, IPPROTO_TCP, 0));
3577 }
3578
3579 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3580
3581 *global_data |=
3582 outerip_off |
3583 (!!(xmit_type & XMIT_CSUM_V6) <<
3584 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3585 (outerip_len <<
3586 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3587 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3588 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
65bc0cfe
DK
3589
3590 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3591 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3592 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3593 }
a848ade4
DK
3594}
3595
9f6c9258
DK
3596/* called with netif_tx_lock
3597 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3598 * netif_wake_queue()
3599 */
3600netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3601{
3602 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3603
9f6c9258 3604 struct netdev_queue *txq;
6383c0b3 3605 struct bnx2x_fp_txdata *txdata;
9f6c9258 3606 struct sw_tx_bd *tx_buf;
619c5cb6 3607 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3608 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3609 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3610 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
a848ade4 3611 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
2297a2da 3612 u32 pbd_e2_parsing_data = 0;
9f6c9258 3613 u16 pkt_prod, bd_prod;
65565884 3614 int nbd, txq_index;
9f6c9258
DK
3615 dma_addr_t mapping;
3616 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3617 int i;
3618 u8 hlen = 0;
3619 __le16 pkt_size = 0;
3620 struct ethhdr *eth;
3621 u8 mac_type = UNICAST_ADDRESS;
3622
3623#ifdef BNX2X_STOP_ON_ERROR
3624 if (unlikely(bp->panic))
3625 return NETDEV_TX_BUSY;
3626#endif
3627
6383c0b3
AE
3628 txq_index = skb_get_queue_mapping(skb);
3629 txq = netdev_get_tx_queue(dev, txq_index);
3630
55c11941 3631 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3632
65565884 3633 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3634
3635 /* enable this debug print to view the transmission queue being used
51c1a580 3636 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3637 txq_index, fp_index, txdata_index); */
9f6c9258 3638
16a5fd92 3639 /* enable this debug print to view the transmission details
51c1a580
MS
3640 DP(NETIF_MSG_TX_QUEUED,
3641 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3642 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3643
6383c0b3 3644 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3645 skb_shinfo(skb)->nr_frags +
3646 BDS_PER_TX_PKT +
3647 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3648 /* Handle special storage cases separately */
c96bdc0c
DK
3649 if (txdata->tx_ring_size == 0) {
3650 struct bnx2x_eth_q_stats *q_stats =
3651 bnx2x_fp_qstats(bp, txdata->parent_fp);
3652 q_stats->driver_filtered_tx_pkt++;
3653 dev_kfree_skb(skb);
3654 return NETDEV_TX_OK;
3655 }
2de67439
YM
3656 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3657 netif_tx_stop_queue(txq);
c96bdc0c 3658 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3659
9f6c9258
DK
3660 return NETDEV_TX_BUSY;
3661 }
3662
51c1a580 3663 DP(NETIF_MSG_TX_QUEUED,
04c46736 3664 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3665 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3666 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3667 skb->len);
9f6c9258
DK
3668
3669 eth = (struct ethhdr *)skb->data;
3670
3671 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3672 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3673 if (is_broadcast_ether_addr(eth->h_dest))
3674 mac_type = BROADCAST_ADDRESS;
3675 else
3676 mac_type = MULTICAST_ADDRESS;
3677 }
3678
91226790 3679#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3680 /* First, check if we need to linearize the skb (due to FW
3681 restrictions). No need to check fragmentation if page size > 8K
3682 (there will be no violation to FW restrictions) */
3683 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3684 /* Statistics of linearization */
3685 bp->lin_cnt++;
3686 if (skb_linearize(skb) != 0) {
51c1a580
MS
3687 DP(NETIF_MSG_TX_QUEUED,
3688 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3689 dev_kfree_skb_any(skb);
3690 return NETDEV_TX_OK;
3691 }
3692 }
3693#endif
619c5cb6
VZ
3694 /* Map skb linear data for DMA */
3695 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3696 skb_headlen(skb), DMA_TO_DEVICE);
3697 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3698 DP(NETIF_MSG_TX_QUEUED,
3699 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3700 dev_kfree_skb_any(skb);
3701 return NETDEV_TX_OK;
3702 }
9f6c9258
DK
3703 /*
3704 Please read carefully. First we use one BD which we mark as start,
3705 then we have a parsing info BD (used for TSO or xsum),
3706 and only then we have the rest of the TSO BDs.
3707 (don't forget to mark the last one as last,
3708 and to unmap only AFTER you write to the BD ...)
3709 And above all, all pdb sizes are in words - NOT DWORDS!
3710 */
3711
619c5cb6
VZ
3712 /* get current pkt produced now - advance it just before sending packet
3713 * since mapping of pages may fail and cause packet to be dropped
3714 */
6383c0b3
AE
3715 pkt_prod = txdata->tx_pkt_prod;
3716 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3717
619c5cb6
VZ
3718 /* get a tx_buf and first BD
3719 * tx_start_bd may be changed during SPLIT,
3720 * but first_bd will always stay first
3721 */
6383c0b3
AE
3722 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3723 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3724 first_bd = tx_start_bd;
9f6c9258
DK
3725
3726 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3727
91226790
DK
3728 /* header nbd: indirectly zero other flags! */
3729 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3730
3731 /* remember the first BD of the packet */
6383c0b3 3732 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3733 tx_buf->skb = skb;
3734 tx_buf->flags = 0;
3735
3736 DP(NETIF_MSG_TX_QUEUED,
3737 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3738 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3739
eab6d18d 3740 if (vlan_tx_tag_present(skb)) {
523224a3
DK
3741 tx_start_bd->vlan_or_ethertype =
3742 cpu_to_le16(vlan_tx_tag_get(skb));
3743 tx_start_bd->bd_flags.as_bitfield |=
3744 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3745 } else {
3746 /* when transmitting in a vf, start bd must hold the ethertype
3747 * for fw to enforce it
3748 */
91226790 3749 if (IS_VF(bp))
dc1ba591
AE
3750 tx_start_bd->vlan_or_ethertype =
3751 cpu_to_le16(ntohs(eth->h_proto));
91226790 3752 else
dc1ba591
AE
3753 /* used by FW for packet accounting */
3754 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
dc1ba591 3755 }
9f6c9258 3756
91226790
DK
3757 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3758
9f6c9258
DK
3759 /* turn on parsing and get a BD */
3760 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3761
93ef5c02
DK
3762 if (xmit_type & XMIT_CSUM)
3763 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3764
619c5cb6 3765 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3766 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f 3767 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
a848ade4
DK
3768
3769 if (xmit_type & XMIT_CSUM_ENC) {
3770 u16 global_data = 0;
3771
3772 /* Set PBD in enc checksum offload case */
3773 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3774 &pbd_e2_parsing_data,
3775 xmit_type);
3776
3777 /* turn on 2nd parsing and get a BD */
3778 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3779
3780 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3781
3782 memset(pbd2, 0, sizeof(*pbd2));
3783
3784 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3785 (skb_inner_network_header(skb) -
3786 skb->data) >> 1;
3787
3788 if (xmit_type & XMIT_GSO_ENC)
3789 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3790 &global_data,
3791 xmit_type);
3792
3793 pbd2->global_data = cpu_to_le16(global_data);
3794
3795 /* add addition parse BD indication to start BD */
3796 SET_FLAG(tx_start_bd->general_data,
3797 ETH_TX_START_BD_PARSE_NBDS, 1);
3798 /* set encapsulation flag in start BD */
3799 SET_FLAG(tx_start_bd->general_data,
3800 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3801 nbd++;
3802 } else if (xmit_type & XMIT_CSUM) {
91226790 3803 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3804 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3805 &pbd_e2_parsing_data,
3806 xmit_type);
a848ade4 3807 }
dc1ba591 3808
91226790
DK
3809 /* Add the macs to the parsing BD this is a vf */
3810 if (IS_VF(bp)) {
3811 /* override GRE parameters in BD */
3812 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3813 &pbd_e2->data.mac_addr.src_mid,
3814 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 3815 eth->h_source);
91226790
DK
3816
3817 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3818 &pbd_e2->data.mac_addr.dst_mid,
3819 &pbd_e2->data.mac_addr.dst_lo,
619c5cb6
VZ
3820 eth->h_dest);
3821 }
96bed4b9
YM
3822
3823 SET_FLAG(pbd_e2_parsing_data,
3824 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3825 } else {
96bed4b9 3826 u16 global_data = 0;
6383c0b3 3827 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3828 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3829 /* Set PBD in checksum offload case */
3830 if (xmit_type & XMIT_CSUM)
3831 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3832
96bed4b9
YM
3833 SET_FLAG(global_data,
3834 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3835 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3836 }
3837
f85582f8 3838 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3839 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3840 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
3841 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3842 pkt_size = tx_start_bd->nbytes;
3843
51c1a580 3844 DP(NETIF_MSG_TX_QUEUED,
91226790 3845 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 3846 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 3847 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3848 tx_start_bd->bd_flags.as_bitfield,
3849 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3850
3851 if (xmit_type & XMIT_GSO) {
3852
3853 DP(NETIF_MSG_TX_QUEUED,
3854 "TSO packet len %d hlen %d total len %d tso size %d\n",
3855 skb->len, hlen, skb_headlen(skb),
3856 skb_shinfo(skb)->gso_size);
3857
3858 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3859
91226790
DK
3860 if (unlikely(skb_headlen(skb) > hlen)) {
3861 nbd++;
6383c0b3
AE
3862 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3863 &tx_start_bd, hlen,
91226790
DK
3864 bd_prod);
3865 }
619c5cb6 3866 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3867 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3868 xmit_type);
f2e0899f 3869 else
44dbc78e 3870 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
9f6c9258 3871 }
2297a2da
VZ
3872
3873 /* Set the PBD's parsing_data field if not zero
3874 * (for the chips newer than 57711).
3875 */
3876 if (pbd_e2_parsing_data)
3877 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3878
9f6c9258
DK
3879 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3880
f85582f8 3881 /* Handle fragmented skb */
9f6c9258
DK
3882 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3883 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3884
9e903e08
ED
3885 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3886 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3887 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3888 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3889
51c1a580
MS
3890 DP(NETIF_MSG_TX_QUEUED,
3891 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3892
3893 /* we need unmap all buffers already mapped
3894 * for this SKB;
3895 * first_bd->nbd need to be properly updated
3896 * before call to bnx2x_free_tx_pkt
3897 */
3898 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3899 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3900 TX_BD(txdata->tx_pkt_prod),
3901 &pkts_compl, &bytes_compl);
619c5cb6
VZ
3902 return NETDEV_TX_OK;
3903 }
3904
9f6c9258 3905 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3906 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3907 if (total_pkt_bd == NULL)
6383c0b3 3908 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3909
9f6c9258
DK
3910 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3911 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
3912 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3913 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 3914 nbd++;
9f6c9258
DK
3915
3916 DP(NETIF_MSG_TX_QUEUED,
3917 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3918 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3919 le16_to_cpu(tx_data_bd->nbytes));
3920 }
3921
3922 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3923
619c5cb6
VZ
3924 /* update with actual num BDs */
3925 first_bd->nbd = cpu_to_le16(nbd);
3926
9f6c9258
DK
3927 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3928
3929 /* now send a tx doorbell, counting the next BD
3930 * if the packet contains or ends with it
3931 */
3932 if (TX_BD_POFF(bd_prod) < nbd)
3933 nbd++;
3934
619c5cb6
VZ
3935 /* total_pkt_bytes should be set on the first data BD if
3936 * it's not an LSO packet and there is more than one
3937 * data BD. In this case pkt_size is limited by an MTU value.
3938 * However we prefer to set it for an LSO packet (while we don't
3939 * have to) in order to save some CPU cycles in a none-LSO
3940 * case, when we much more care about them.
3941 */
9f6c9258
DK
3942 if (total_pkt_bd != NULL)
3943 total_pkt_bd->total_pkt_bytes = pkt_size;
3944
523224a3 3945 if (pbd_e1x)
9f6c9258 3946 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3947 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
3948 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3949 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3950 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3951 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
3952 if (pbd_e2)
3953 DP(NETIF_MSG_TX_QUEUED,
3954 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
3955 pbd_e2,
3956 pbd_e2->data.mac_addr.dst_hi,
3957 pbd_e2->data.mac_addr.dst_mid,
3958 pbd_e2->data.mac_addr.dst_lo,
3959 pbd_e2->data.mac_addr.src_hi,
3960 pbd_e2->data.mac_addr.src_mid,
3961 pbd_e2->data.mac_addr.src_lo,
f2e0899f 3962 pbd_e2->parsing_data);
9f6c9258
DK
3963 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3964
2df1a70a
TH
3965 netdev_tx_sent_queue(txq, skb->len);
3966
8373c57d
WB
3967 skb_tx_timestamp(skb);
3968
6383c0b3 3969 txdata->tx_pkt_prod++;
9f6c9258
DK
3970 /*
3971 * Make sure that the BD data is updated before updating the producer
3972 * since FW might read the BD right after the producer is updated.
3973 * This is only applicable for weak-ordered memory model archs such
3974 * as IA-64. The following barrier is also mandatory since FW will
3975 * assumes packets must have BDs.
3976 */
3977 wmb();
3978
6383c0b3 3979 txdata->tx_db.data.prod += nbd;
9f6c9258 3980 barrier();
f85582f8 3981
6383c0b3 3982 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
3983
3984 mmiowb();
3985
6383c0b3 3986 txdata->tx_bd_prod += nbd;
9f6c9258 3987
7df2dc6b 3988 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
3989 netif_tx_stop_queue(txq);
3990
3991 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3992 * ordering of set_bit() in netif_tx_stop_queue() and read of
3993 * fp->bd_tx_cons */
3994 smp_mb();
3995
15192a8c 3996 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 3997 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
3998 netif_tx_wake_queue(txq);
3999 }
6383c0b3 4000 txdata->tx_pkt++;
9f6c9258
DK
4001
4002 return NETDEV_TX_OK;
4003}
f85582f8 4004
6383c0b3
AE
4005/**
4006 * bnx2x_setup_tc - routine to configure net_device for multi tc
4007 *
4008 * @netdev: net device to configure
4009 * @tc: number of traffic classes to enable
4010 *
4011 * callback connected to the ndo_setup_tc function pointer
4012 */
4013int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4014{
4015 int cos, prio, count, offset;
4016 struct bnx2x *bp = netdev_priv(dev);
4017
4018 /* setup tc must be called under rtnl lock */
4019 ASSERT_RTNL();
4020
16a5fd92 4021 /* no traffic classes requested. Aborting */
6383c0b3
AE
4022 if (!num_tc) {
4023 netdev_reset_tc(dev);
4024 return 0;
4025 }
4026
4027 /* requested to support too many traffic classes */
4028 if (num_tc > bp->max_cos) {
6bf07b8e 4029 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
51c1a580 4030 num_tc, bp->max_cos);
6383c0b3
AE
4031 return -EINVAL;
4032 }
4033
4034 /* declare amount of supported traffic classes */
4035 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 4036 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
4037 return -EINVAL;
4038 }
4039
4040 /* configure priority to traffic class mapping */
4041 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4042 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
4043 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4044 "mapping priority %d to tc %d\n",
6383c0b3
AE
4045 prio, bp->prio_to_cos[prio]);
4046 }
4047
16a5fd92 4048 /* Use this configuration to differentiate tc0 from other COSes
6383c0b3
AE
4049 This can be used for ets or pfc, and save the effort of setting
4050 up a multio class queue disc or negotiating DCBX with a switch
4051 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 4052 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
4053 for (prio = 1; prio < 16; prio++) {
4054 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 4055 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
4056 } */
4057
4058 /* configure traffic class to transmission queue mapping */
4059 for (cos = 0; cos < bp->max_cos; cos++) {
4060 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 4061 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 4062 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
4063 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4064 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
4065 cos, offset, count);
4066 }
4067
4068 return 0;
4069}
4070
9f6c9258
DK
4071/* called with rtnl_lock */
4072int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4073{
4074 struct sockaddr *addr = p;
4075 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 4076 int rc = 0;
9f6c9258 4077
51c1a580
MS
4078 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4079 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 4080 return -EINVAL;
51c1a580 4081 }
614c76df 4082
a3348722
BW
4083 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4084 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 4085 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 4086 return -EINVAL;
51c1a580 4087 }
9f6c9258 4088
619c5cb6
VZ
4089 if (netif_running(dev)) {
4090 rc = bnx2x_set_eth_mac(bp, false);
4091 if (rc)
4092 return rc;
4093 }
4094
9f6c9258 4095 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 4096
523224a3 4097 if (netif_running(dev))
619c5cb6 4098 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 4099
619c5cb6 4100 return rc;
9f6c9258
DK
4101}
4102
b3b83c3f
DK
4103static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4104{
4105 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4106 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 4107 u8 cos;
b3b83c3f
DK
4108
4109 /* Common */
55c11941 4110
b3b83c3f
DK
4111 if (IS_FCOE_IDX(fp_index)) {
4112 memset(sb, 0, sizeof(union host_hc_status_block));
4113 fp->status_blk_mapping = 0;
b3b83c3f 4114 } else {
b3b83c3f 4115 /* status blocks */
619c5cb6 4116 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4117 BNX2X_PCI_FREE(sb->e2_sb,
4118 bnx2x_fp(bp, fp_index,
4119 status_blk_mapping),
4120 sizeof(struct host_hc_status_block_e2));
4121 else
4122 BNX2X_PCI_FREE(sb->e1x_sb,
4123 bnx2x_fp(bp, fp_index,
4124 status_blk_mapping),
4125 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4126 }
55c11941 4127
b3b83c3f
DK
4128 /* Rx */
4129 if (!skip_rx_queue(bp, fp_index)) {
4130 bnx2x_free_rx_bds(fp);
4131
4132 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4133 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4134 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4135 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4136 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4137
4138 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4139 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4140 sizeof(struct eth_fast_path_rx_cqe) *
4141 NUM_RCQ_BD);
4142
4143 /* SGE ring */
4144 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4145 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4146 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4147 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4148 }
4149
4150 /* Tx */
4151 if (!skip_tx_queue(bp, fp_index)) {
4152 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4153 for_each_cos_in_tx_queue(fp, cos) {
65565884 4154 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4155
51c1a580 4156 DP(NETIF_MSG_IFDOWN,
94f05b0f 4157 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
4158 fp_index, cos, txdata->cid);
4159
4160 BNX2X_FREE(txdata->tx_buf_ring);
4161 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4162 txdata->tx_desc_mapping,
4163 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4164 }
b3b83c3f
DK
4165 }
4166 /* end of fastpath */
4167}
4168
55c11941
MS
4169void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4170{
4171 int i;
4172 for_each_cnic_queue(bp, i)
4173 bnx2x_free_fp_mem_at(bp, i);
4174}
4175
b3b83c3f
DK
4176void bnx2x_free_fp_mem(struct bnx2x *bp)
4177{
4178 int i;
55c11941 4179 for_each_eth_queue(bp, i)
b3b83c3f
DK
4180 bnx2x_free_fp_mem_at(bp, i);
4181}
4182
1191cb83 4183static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
4184{
4185 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 4186 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
4187 bnx2x_fp(bp, index, sb_index_values) =
4188 (__le16 *)status_blk.e2_sb->sb.index_values;
4189 bnx2x_fp(bp, index, sb_running_index) =
4190 (__le16 *)status_blk.e2_sb->sb.running_index;
4191 } else {
4192 bnx2x_fp(bp, index, sb_index_values) =
4193 (__le16 *)status_blk.e1x_sb->sb.index_values;
4194 bnx2x_fp(bp, index, sb_running_index) =
4195 (__le16 *)status_blk.e1x_sb->sb.running_index;
4196 }
4197}
4198
1191cb83
ED
4199/* Returns the number of actually allocated BDs */
4200static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4201 int rx_ring_size)
4202{
4203 struct bnx2x *bp = fp->bp;
4204 u16 ring_prod, cqe_ring_prod;
4205 int i, failure_cnt = 0;
4206
4207 fp->rx_comp_cons = 0;
4208 cqe_ring_prod = ring_prod = 0;
4209
4210 /* This routine is called only during fo init so
4211 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4212 */
4213 for (i = 0; i < rx_ring_size; i++) {
4214 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4215 failure_cnt++;
4216 continue;
4217 }
4218 ring_prod = NEXT_RX_IDX(ring_prod);
4219 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4220 WARN_ON(ring_prod <= (i - failure_cnt));
4221 }
4222
4223 if (failure_cnt)
4224 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4225 i - failure_cnt, fp->index);
4226
4227 fp->rx_bd_prod = ring_prod;
4228 /* Limit the CQE producer by the CQE ring size */
4229 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4230 cqe_ring_prod);
4231 fp->rx_pkt = fp->rx_calls = 0;
4232
15192a8c 4233 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4234
4235 return i - failure_cnt;
4236}
4237
4238static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4239{
4240 int i;
4241
4242 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4243 struct eth_rx_cqe_next_page *nextpg;
4244
4245 nextpg = (struct eth_rx_cqe_next_page *)
4246 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4247 nextpg->addr_hi =
4248 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4249 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4250 nextpg->addr_lo =
4251 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4252 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4253 }
4254}
4255
b3b83c3f
DK
4256static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4257{
4258 union host_hc_status_block *sb;
4259 struct bnx2x_fastpath *fp = &bp->fp[index];
4260 int ring_size = 0;
6383c0b3 4261 u8 cos;
c2188952 4262 int rx_ring_size = 0;
b3b83c3f 4263
a3348722
BW
4264 if (!bp->rx_ring_size &&
4265 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
4266 rx_ring_size = MIN_RX_SIZE_NONTPA;
4267 bp->rx_ring_size = rx_ring_size;
55c11941 4268 } else if (!bp->rx_ring_size) {
c2188952
VZ
4269 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4270
065f8b92
YM
4271 if (CHIP_IS_E3(bp)) {
4272 u32 cfg = SHMEM_RD(bp,
4273 dev_info.port_hw_config[BP_PORT(bp)].
4274 default_cfg);
4275
4276 /* Decrease ring size for 1G functions */
4277 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4278 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4279 rx_ring_size /= 10;
4280 }
d760fc37 4281
c2188952
VZ
4282 /* allocate at least number of buffers required by FW */
4283 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4284 MIN_RX_SIZE_TPA, rx_ring_size);
4285
4286 bp->rx_ring_size = rx_ring_size;
614c76df 4287 } else /* if rx_ring_size specified - use it */
c2188952 4288 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4289
04c46736
YM
4290 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4291
b3b83c3f
DK
4292 /* Common */
4293 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4294
b3b83c3f 4295 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4296 /* status blocks */
619c5cb6 4297 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4298 BNX2X_PCI_ALLOC(sb->e2_sb,
4299 &bnx2x_fp(bp, index, status_blk_mapping),
4300 sizeof(struct host_hc_status_block_e2));
4301 else
4302 BNX2X_PCI_ALLOC(sb->e1x_sb,
4303 &bnx2x_fp(bp, index, status_blk_mapping),
4304 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4305 }
8eef2af1
DK
4306
4307 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4308 * set shortcuts for it.
4309 */
4310 if (!IS_FCOE_IDX(index))
4311 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4312
4313 /* Tx */
4314 if (!skip_tx_queue(bp, index)) {
4315 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4316 for_each_cos_in_tx_queue(fp, cos) {
65565884 4317 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4318
51c1a580
MS
4319 DP(NETIF_MSG_IFUP,
4320 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4321 index, cos);
4322
4323 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 4324 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
4325 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4326 &txdata->tx_desc_mapping,
b3b83c3f 4327 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 4328 }
b3b83c3f
DK
4329 }
4330
4331 /* Rx */
4332 if (!skip_rx_queue(bp, index)) {
4333 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4334 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4335 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4336 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4337 &bnx2x_fp(bp, index, rx_desc_mapping),
4338 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4339
75b29459
DK
4340 /* Seed all CQEs by 1s */
4341 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4342 &bnx2x_fp(bp, index, rx_comp_mapping),
4343 sizeof(struct eth_fast_path_rx_cqe) *
4344 NUM_RCQ_BD);
b3b83c3f
DK
4345
4346 /* SGE ring */
4347 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4348 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4349 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4350 &bnx2x_fp(bp, index, rx_sge_mapping),
4351 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4352 /* RX BD ring */
4353 bnx2x_set_next_page_rx_bd(fp);
4354
4355 /* CQ ring */
4356 bnx2x_set_next_page_rx_cq(fp);
4357
4358 /* BDs */
4359 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4360 if (ring_size < rx_ring_size)
4361 goto alloc_mem_err;
4362 }
4363
4364 return 0;
4365
4366/* handles low memory cases */
4367alloc_mem_err:
4368 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4369 index, ring_size);
4370 /* FW will drop all packets if queue is not big enough,
4371 * In these cases we disable the queue
6383c0b3 4372 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
4373 */
4374 if (ring_size < (fp->disable_tpa ?
eb722d7a 4375 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4376 /* release memory allocated for this queue */
4377 bnx2x_free_fp_mem_at(bp, index);
4378 return -ENOMEM;
4379 }
4380 return 0;
4381}
4382
55c11941
MS
4383int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4384{
4385 if (!NO_FCOE(bp))
4386 /* FCoE */
4387 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4388 /* we will fail load process instead of mark
4389 * NO_FCOE_FLAG
4390 */
4391 return -ENOMEM;
4392
4393 return 0;
4394}
4395
b3b83c3f
DK
4396int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4397{
4398 int i;
4399
55c11941
MS
4400 /* 1. Allocate FP for leading - fatal if error
4401 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4402 */
4403
4404 /* leading */
4405 if (bnx2x_alloc_fp_mem_at(bp, 0))
4406 return -ENOMEM;
6383c0b3 4407
b3b83c3f
DK
4408 /* RSS */
4409 for_each_nondefault_eth_queue(bp, i)
4410 if (bnx2x_alloc_fp_mem_at(bp, i))
4411 break;
4412
4413 /* handle memory failures */
4414 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4415 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4416
4417 WARN_ON(delta < 0);
4864a16a 4418 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4419 if (CNIC_SUPPORT(bp))
4420 /* move non eth FPs next to last eth FP
4421 * must be done in that order
4422 * FCOE_IDX < FWD_IDX < OOO_IDX
4423 */
b3b83c3f 4424
55c11941
MS
4425 /* move FCoE fp even NO_FCOE_FLAG is on */
4426 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4427 bp->num_ethernet_queues -= delta;
4428 bp->num_queues = bp->num_ethernet_queues +
4429 bp->num_cnic_queues;
b3b83c3f
DK
4430 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4431 bp->num_queues + delta, bp->num_queues);
4432 }
4433
4434 return 0;
4435}
d6214d7a 4436
523224a3
DK
4437void bnx2x_free_mem_bp(struct bnx2x *bp)
4438{
c3146eb6
DK
4439 int i;
4440
4441 for (i = 0; i < bp->fp_array_size; i++)
4442 kfree(bp->fp[i].tpa_info);
523224a3 4443 kfree(bp->fp);
15192a8c
BW
4444 kfree(bp->sp_objs);
4445 kfree(bp->fp_stats);
65565884 4446 kfree(bp->bnx2x_txq);
523224a3
DK
4447 kfree(bp->msix_table);
4448 kfree(bp->ilt);
4449}
4450
0329aba1 4451int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4452{
4453 struct bnx2x_fastpath *fp;
4454 struct msix_entry *tbl;
4455 struct bnx2x_ilt *ilt;
6383c0b3 4456 int msix_table_size = 0;
55c11941 4457 int fp_array_size, txq_array_size;
15192a8c 4458 int i;
6383c0b3
AE
4459
4460 /*
4461 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4462 * path IGU SBs plus default SB (for PF only).
6383c0b3 4463 */
1ab4434c
AE
4464 msix_table_size = bp->igu_sb_cnt;
4465 if (IS_PF(bp))
4466 msix_table_size++;
4467 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4468
6383c0b3 4469 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4470 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4471 bp->fp_array_size = fp_array_size;
4472 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4473
c3146eb6 4474 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4475 if (!fp)
4476 goto alloc_err;
c3146eb6 4477 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4478 fp[i].tpa_info =
4479 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4480 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4481 if (!(fp[i].tpa_info))
4482 goto alloc_err;
4483 }
4484
523224a3
DK
4485 bp->fp = fp;
4486
15192a8c 4487 /* allocate sp objs */
c3146eb6 4488 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4489 GFP_KERNEL);
4490 if (!bp->sp_objs)
4491 goto alloc_err;
4492
4493 /* allocate fp_stats */
c3146eb6 4494 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4495 GFP_KERNEL);
4496 if (!bp->fp_stats)
4497 goto alloc_err;
4498
65565884 4499 /* Allocate memory for the transmission queues array */
55c11941
MS
4500 txq_array_size =
4501 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4502 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4503
4504 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4505 GFP_KERNEL);
65565884
MS
4506 if (!bp->bnx2x_txq)
4507 goto alloc_err;
4508
523224a3 4509 /* msix table */
01e23742 4510 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4511 if (!tbl)
4512 goto alloc_err;
4513 bp->msix_table = tbl;
4514
4515 /* ilt */
4516 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4517 if (!ilt)
4518 goto alloc_err;
4519 bp->ilt = ilt;
4520
4521 return 0;
4522alloc_err:
4523 bnx2x_free_mem_bp(bp);
4524 return -ENOMEM;
523224a3
DK
4525}
4526
a9fccec7 4527int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4528{
4529 struct bnx2x *bp = netdev_priv(dev);
4530
4531 if (unlikely(!netif_running(dev)))
4532 return 0;
4533
5d07d868 4534 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4535 return bnx2x_nic_load(bp, LOAD_NORMAL);
4536}
4537
1ac9e428
YR
4538int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4539{
4540 u32 sel_phy_idx = 0;
4541 if (bp->link_params.num_phys <= 1)
4542 return INT_PHY;
4543
4544 if (bp->link_vars.link_up) {
4545 sel_phy_idx = EXT_PHY1;
4546 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4547 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4548 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4549 sel_phy_idx = EXT_PHY2;
4550 } else {
4551
4552 switch (bnx2x_phy_selection(&bp->link_params)) {
4553 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4554 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4555 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4556 sel_phy_idx = EXT_PHY1;
4557 break;
4558 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4559 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4560 sel_phy_idx = EXT_PHY2;
4561 break;
4562 }
4563 }
4564
4565 return sel_phy_idx;
1ac9e428
YR
4566}
4567int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4568{
4569 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4570 /*
2de67439 4571 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4572 * swapping is enabled). So when swapping is enabled, we need to reverse
4573 * the configuration
4574 */
4575
4576 if (bp->link_params.multi_phy_config &
4577 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4578 if (sel_phy_idx == EXT_PHY1)
4579 sel_phy_idx = EXT_PHY2;
4580 else if (sel_phy_idx == EXT_PHY2)
4581 sel_phy_idx = EXT_PHY1;
4582 }
4583 return LINK_CONFIG_IDX(sel_phy_idx);
4584}
4585
55c11941 4586#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4587int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4588{
4589 struct bnx2x *bp = netdev_priv(dev);
4590 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4591
4592 switch (type) {
4593 case NETDEV_FCOE_WWNN:
4594 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4595 cp->fcoe_wwn_node_name_lo);
4596 break;
4597 case NETDEV_FCOE_WWPN:
4598 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4599 cp->fcoe_wwn_port_name_lo);
4600 break;
4601 default:
51c1a580 4602 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4603 return -EINVAL;
4604 }
4605
4606 return 0;
4607}
4608#endif
4609
9f6c9258
DK
4610/* called with rtnl_lock */
4611int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4612{
4613 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4614
4615 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4616 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4617 return -EAGAIN;
4618 }
4619
4620 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4621 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4622 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4623 return -EINVAL;
51c1a580 4624 }
9f6c9258
DK
4625
4626 /* This does not race with packet allocation
4627 * because the actual alloc size is
4628 * only updated as part of load
4629 */
4630 dev->mtu = new_mtu;
4631
66371c44
MM
4632 return bnx2x_reload_if_running(dev);
4633}
4634
c8f44aff 4635netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4636 netdev_features_t features)
66371c44
MM
4637{
4638 struct bnx2x *bp = netdev_priv(dev);
4639
4640 /* TPA requires Rx CSUM offloading */
621b4d66 4641 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 4642 features &= ~NETIF_F_LRO;
621b4d66
DK
4643 features &= ~NETIF_F_GRO;
4644 }
66371c44
MM
4645
4646 return features;
4647}
4648
c8f44aff 4649int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4650{
4651 struct bnx2x *bp = netdev_priv(dev);
4652 u32 flags = bp->flags;
8802f579 4653 u32 changes;
538dd2e3 4654 bool bnx2x_reload = false;
66371c44
MM
4655
4656 if (features & NETIF_F_LRO)
4657 flags |= TPA_ENABLE_FLAG;
4658 else
4659 flags &= ~TPA_ENABLE_FLAG;
4660
621b4d66
DK
4661 if (features & NETIF_F_GRO)
4662 flags |= GRO_ENABLE_FLAG;
4663 else
4664 flags &= ~GRO_ENABLE_FLAG;
4665
538dd2e3
MB
4666 if (features & NETIF_F_LOOPBACK) {
4667 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4668 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4669 bnx2x_reload = true;
4670 }
4671 } else {
4672 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4673 bp->link_params.loopback_mode = LOOPBACK_NONE;
4674 bnx2x_reload = true;
4675 }
4676 }
4677
8802f579
ED
4678 changes = flags ^ bp->flags;
4679
16a5fd92 4680 /* if GRO is changed while LRO is enabled, don't force a reload */
8802f579
ED
4681 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4682 changes &= ~GRO_ENABLE_FLAG;
4683
4684 if (changes)
538dd2e3 4685 bnx2x_reload = true;
8802f579
ED
4686
4687 bp->flags = flags;
66371c44 4688
538dd2e3 4689 if (bnx2x_reload) {
66371c44
MM
4690 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4691 return bnx2x_reload_if_running(dev);
4692 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4693 }
4694
66371c44 4695 return 0;
9f6c9258
DK
4696}
4697
4698void bnx2x_tx_timeout(struct net_device *dev)
4699{
4700 struct bnx2x *bp = netdev_priv(dev);
4701
4702#ifdef BNX2X_STOP_ON_ERROR
4703 if (!bp->panic)
4704 bnx2x_panic();
4705#endif
7be08a72
AE
4706
4707 smp_mb__before_clear_bit();
4708 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4709 smp_mb__after_clear_bit();
4710
9f6c9258 4711 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 4712 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
4713}
4714
9f6c9258
DK
4715int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4716{
4717 struct net_device *dev = pci_get_drvdata(pdev);
4718 struct bnx2x *bp;
4719
4720 if (!dev) {
4721 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4722 return -ENODEV;
4723 }
4724 bp = netdev_priv(dev);
4725
4726 rtnl_lock();
4727
4728 pci_save_state(pdev);
4729
4730 if (!netif_running(dev)) {
4731 rtnl_unlock();
4732 return 0;
4733 }
4734
4735 netif_device_detach(dev);
4736
5d07d868 4737 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4738
4739 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4740
4741 rtnl_unlock();
4742
4743 return 0;
4744}
4745
4746int bnx2x_resume(struct pci_dev *pdev)
4747{
4748 struct net_device *dev = pci_get_drvdata(pdev);
4749 struct bnx2x *bp;
4750 int rc;
4751
4752 if (!dev) {
4753 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4754 return -ENODEV;
4755 }
4756 bp = netdev_priv(dev);
4757
4758 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4759 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4760 return -EAGAIN;
4761 }
4762
4763 rtnl_lock();
4764
4765 pci_restore_state(pdev);
4766
4767 if (!netif_running(dev)) {
4768 rtnl_unlock();
4769 return 0;
4770 }
4771
4772 bnx2x_set_power_state(bp, PCI_D0);
4773 netif_device_attach(dev);
4774
4775 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4776
4777 rtnl_unlock();
4778
4779 return rc;
4780}
619c5cb6 4781
619c5cb6
VZ
4782void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4783 u32 cid)
4784{
4785 /* ustorm cxt validation */
4786 cxt->ustorm_ag_context.cdu_usage =
4787 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4788 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4789 /* xcontext validation */
4790 cxt->xstorm_ag_context.cdu_reserved =
4791 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4792 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4793}
4794
1191cb83
ED
4795static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4796 u8 fw_sb_id, u8 sb_index,
4797 u8 ticks)
619c5cb6 4798{
619c5cb6
VZ
4799 u32 addr = BAR_CSTRORM_INTMEM +
4800 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4801 REG_WR8(bp, addr, ticks);
51c1a580
MS
4802 DP(NETIF_MSG_IFUP,
4803 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4804 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4805}
4806
1191cb83
ED
4807static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4808 u16 fw_sb_id, u8 sb_index,
4809 u8 disable)
619c5cb6
VZ
4810{
4811 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4812 u32 addr = BAR_CSTRORM_INTMEM +
4813 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
0c14e5ce 4814 u8 flags = REG_RD8(bp, addr);
619c5cb6
VZ
4815 /* clear and set */
4816 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4817 flags |= enable_flag;
0c14e5ce 4818 REG_WR8(bp, addr, flags);
51c1a580
MS
4819 DP(NETIF_MSG_IFUP,
4820 "port %x fw_sb_id %d sb_index %d disable %d\n",
4821 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
4822}
4823
4824void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4825 u8 sb_index, u8 disable, u16 usec)
4826{
4827 int port = BP_PORT(bp);
4828 u8 ticks = usec / BNX2X_BTR;
4829
4830 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4831
4832 disable = disable ? 1 : (usec ? 0 : 1);
4833 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4834}