]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/bnx2x/bnx2x_cmn.c
64df0ef97e022cde4b1c8237c643a1aff5831c48
[mirror_ubuntu-artful-kernel.git] / drivers / net / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2011 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
20 #include <linux/interrupt.h>
21 #include <linux/ip.h>
22 #include <net/ipv6.h>
23 #include <net/ip6_checksum.h>
24 #include <linux/firmware.h>
25 #include <linux/prefetch.h>
26 #include "bnx2x_cmn.h"
27 #include "bnx2x_init.h"
28 #include "bnx2x_sp.h"
29
30
31
32 /**
33 * bnx2x_bz_fp - zero content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @index: fastpath index to be zeroed
37 *
38 * Makes sure the contents of the bp->fp[index].napi is kept
39 * intact.
40 */
41 static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
42 {
43 struct bnx2x_fastpath *fp = &bp->fp[index];
44 struct napi_struct orig_napi = fp->napi;
45 /* bzero bnx2x_fastpath contents */
46 memset(fp, 0, sizeof(*fp));
47
48 /* Restore the NAPI object as it has been already initialized */
49 fp->napi = orig_napi;
50
51 fp->bp = bp;
52 fp->index = index;
53 if (IS_ETH_FP(fp))
54 fp->max_cos = bp->max_cos;
55 else
56 /* Special queues support only one CoS */
57 fp->max_cos = 1;
58
59 /*
60 * set the tpa flag for each queue. The tpa flag determines the queue
61 * minimal size so it must be set prior to queue memory allocation
62 */
63 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
64
65 #ifdef BCM_CNIC
66 /* We don't want TPA on an FCoE L2 ring */
67 if (IS_FCOE_FP(fp))
68 fp->disable_tpa = 1;
69 #endif
70 }
71
72 /**
73 * bnx2x_move_fp - move content of the fastpath structure.
74 *
75 * @bp: driver handle
76 * @from: source FP index
77 * @to: destination FP index
78 *
79 * Makes sure the contents of the bp->fp[to].napi is kept
80 * intact.
81 */
82 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
83 {
84 struct bnx2x_fastpath *from_fp = &bp->fp[from];
85 struct bnx2x_fastpath *to_fp = &bp->fp[to];
86 struct napi_struct orig_napi = to_fp->napi;
87 /* Move bnx2x_fastpath contents */
88 memcpy(to_fp, from_fp, sizeof(*to_fp));
89 to_fp->index = to;
90
91 /* Restore the NAPI object as it has been already initialized */
92 to_fp->napi = orig_napi;
93 }
94
95 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
96
97 /* free skb in the packet ring at pos idx
98 * return idx of last bd freed
99 */
100 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
101 u16 idx)
102 {
103 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
104 struct eth_tx_start_bd *tx_start_bd;
105 struct eth_tx_bd *tx_data_bd;
106 struct sk_buff *skb = tx_buf->skb;
107 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
108 int nbd;
109
110 /* prefetch skb end pointer to speedup dev_kfree_skb() */
111 prefetch(&skb->end);
112
113 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
114 txdata->txq_index, idx, tx_buf, skb);
115
116 /* unmap first bd */
117 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
118 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
119 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
120 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
121
122
123 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
124 #ifdef BNX2X_STOP_ON_ERROR
125 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
126 BNX2X_ERR("BAD nbd!\n");
127 bnx2x_panic();
128 }
129 #endif
130 new_cons = nbd + tx_buf->first_bd;
131
132 /* Get the next bd */
133 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
134
135 /* Skip a parse bd... */
136 --nbd;
137 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
138
139 /* ...and the TSO split header bd since they have no mapping */
140 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
141 --nbd;
142 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
143 }
144
145 /* now free frags */
146 while (nbd > 0) {
147
148 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
149 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
150 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
151 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
152 if (--nbd)
153 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
154 }
155
156 /* release skb */
157 WARN_ON(!skb);
158 dev_kfree_skb_any(skb);
159 tx_buf->first_bd = 0;
160 tx_buf->skb = NULL;
161
162 return new_cons;
163 }
164
165 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
166 {
167 struct netdev_queue *txq;
168 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
169
170 #ifdef BNX2X_STOP_ON_ERROR
171 if (unlikely(bp->panic))
172 return -1;
173 #endif
174
175 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
176 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
177 sw_cons = txdata->tx_pkt_cons;
178
179 while (sw_cons != hw_cons) {
180 u16 pkt_cons;
181
182 pkt_cons = TX_BD(sw_cons);
183
184 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
185 " pkt_cons %u\n",
186 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
187
188 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
189 sw_cons++;
190 }
191
192 txdata->tx_pkt_cons = sw_cons;
193 txdata->tx_bd_cons = bd_cons;
194
195 /* Need to make the tx_bd_cons update visible to start_xmit()
196 * before checking for netif_tx_queue_stopped(). Without the
197 * memory barrier, there is a small possibility that
198 * start_xmit() will miss it and cause the queue to be stopped
199 * forever.
200 * On the other hand we need an rmb() here to ensure the proper
201 * ordering of bit testing in the following
202 * netif_tx_queue_stopped(txq) call.
203 */
204 smp_mb();
205
206 if (unlikely(netif_tx_queue_stopped(txq))) {
207 /* Taking tx_lock() is needed to prevent reenabling the queue
208 * while it's empty. This could have happen if rx_action() gets
209 * suspended in bnx2x_tx_int() after the condition before
210 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
211 *
212 * stops the queue->sees fresh tx_bd_cons->releases the queue->
213 * sends some packets consuming the whole queue again->
214 * stops the queue
215 */
216
217 __netif_tx_lock(txq, smp_processor_id());
218
219 if ((netif_tx_queue_stopped(txq)) &&
220 (bp->state == BNX2X_STATE_OPEN) &&
221 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
222 netif_tx_wake_queue(txq);
223
224 __netif_tx_unlock(txq);
225 }
226 return 0;
227 }
228
229 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
230 u16 idx)
231 {
232 u16 last_max = fp->last_max_sge;
233
234 if (SUB_S16(idx, last_max) > 0)
235 fp->last_max_sge = idx;
236 }
237
238 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
239 struct eth_fast_path_rx_cqe *fp_cqe)
240 {
241 struct bnx2x *bp = fp->bp;
242 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
243 le16_to_cpu(fp_cqe->len_on_bd)) >>
244 SGE_PAGE_SHIFT;
245 u16 last_max, last_elem, first_elem;
246 u16 delta = 0;
247 u16 i;
248
249 if (!sge_len)
250 return;
251
252 /* First mark all used pages */
253 for (i = 0; i < sge_len; i++)
254 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
255 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
256
257 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
258 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
259
260 /* Here we assume that the last SGE index is the biggest */
261 prefetch((void *)(fp->sge_mask));
262 bnx2x_update_last_max_sge(fp,
263 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
264
265 last_max = RX_SGE(fp->last_max_sge);
266 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
267 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
268
269 /* If ring is not full */
270 if (last_elem + 1 != first_elem)
271 last_elem++;
272
273 /* Now update the prod */
274 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
275 if (likely(fp->sge_mask[i]))
276 break;
277
278 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
279 delta += BIT_VEC64_ELEM_SZ;
280 }
281
282 if (delta > 0) {
283 fp->rx_sge_prod += delta;
284 /* clear page-end entries */
285 bnx2x_clear_sge_mask_next_elems(fp);
286 }
287
288 DP(NETIF_MSG_RX_STATUS,
289 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
290 fp->last_max_sge, fp->rx_sge_prod);
291 }
292
293 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
294 struct sk_buff *skb, u16 cons, u16 prod,
295 struct eth_fast_path_rx_cqe *cqe)
296 {
297 struct bnx2x *bp = fp->bp;
298 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
299 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
300 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
301 dma_addr_t mapping;
302 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
303 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
304
305 /* print error if current state != stop */
306 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
307 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
308
309 /* Try to map an empty skb from the aggregation info */
310 mapping = dma_map_single(&bp->pdev->dev,
311 first_buf->skb->data,
312 fp->rx_buf_size, DMA_FROM_DEVICE);
313 /*
314 * ...if it fails - move the skb from the consumer to the producer
315 * and set the current aggregation state as ERROR to drop it
316 * when TPA_STOP arrives.
317 */
318
319 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
320 /* Move the BD from the consumer to the producer */
321 bnx2x_reuse_rx_skb(fp, cons, prod);
322 tpa_info->tpa_state = BNX2X_TPA_ERROR;
323 return;
324 }
325
326 /* move empty skb from pool to prod */
327 prod_rx_buf->skb = first_buf->skb;
328 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
329 /* point prod_bd to new skb */
330 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
331 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
332
333 /* move partial skb from cons to pool (don't unmap yet) */
334 *first_buf = *cons_rx_buf;
335
336 /* mark bin state as START */
337 tpa_info->parsing_flags =
338 le16_to_cpu(cqe->pars_flags.flags);
339 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
340 tpa_info->tpa_state = BNX2X_TPA_START;
341 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
342 tpa_info->placement_offset = cqe->placement_offset;
343
344 #ifdef BNX2X_STOP_ON_ERROR
345 fp->tpa_queue_used |= (1 << queue);
346 #ifdef _ASM_GENERIC_INT_L64_H
347 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
348 #else
349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
350 #endif
351 fp->tpa_queue_used);
352 #endif
353 }
354
355 /* Timestamp option length allowed for TPA aggregation:
356 *
357 * nop nop kind length echo val
358 */
359 #define TPA_TSTAMP_OPT_LEN 12
360 /**
361 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
362 *
363 * @bp: driver handle
364 * @parsing_flags: parsing flags from the START CQE
365 * @len_on_bd: total length of the first packet for the
366 * aggregation.
367 *
368 * Approximate value of the MSS for this aggregation calculated using
369 * the first packet of it.
370 */
371 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
372 u16 len_on_bd)
373 {
374 /*
375 * TPA arrgregation won't have either IP options or TCP options
376 * other than timestamp or IPv6 extension headers.
377 */
378 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
379
380 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
381 PRS_FLAG_OVERETH_IPV6)
382 hdrs_len += sizeof(struct ipv6hdr);
383 else /* IPv4 */
384 hdrs_len += sizeof(struct iphdr);
385
386
387 /* Check if there was a TCP timestamp, if there is it's will
388 * always be 12 bytes length: nop nop kind length echo val.
389 *
390 * Otherwise FW would close the aggregation.
391 */
392 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
393 hdrs_len += TPA_TSTAMP_OPT_LEN;
394
395 return len_on_bd - hdrs_len;
396 }
397
398 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
399 u16 queue, struct sk_buff *skb,
400 struct eth_end_agg_rx_cqe *cqe,
401 u16 cqe_idx)
402 {
403 struct sw_rx_page *rx_pg, old_rx_pg;
404 u32 i, frag_len, frag_size, pages;
405 int err;
406 int j;
407 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
408 u16 len_on_bd = tpa_info->len_on_bd;
409
410 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
411 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
412
413 /* This is needed in order to enable forwarding support */
414 if (frag_size)
415 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
416 tpa_info->parsing_flags, len_on_bd);
417
418 #ifdef BNX2X_STOP_ON_ERROR
419 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
420 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
421 pages, cqe_idx);
422 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
423 bnx2x_panic();
424 return -EINVAL;
425 }
426 #endif
427
428 /* Run through the SGL and compose the fragmented skb */
429 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
430 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
431
432 /* FW gives the indices of the SGE as if the ring is an array
433 (meaning that "next" element will consume 2 indices) */
434 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
435 rx_pg = &fp->rx_page_ring[sge_idx];
436 old_rx_pg = *rx_pg;
437
438 /* If we fail to allocate a substitute page, we simply stop
439 where we are and drop the whole packet */
440 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
441 if (unlikely(err)) {
442 fp->eth_q_stats.rx_skb_alloc_failed++;
443 return err;
444 }
445
446 /* Unmap the page as we r going to pass it to the stack */
447 dma_unmap_page(&bp->pdev->dev,
448 dma_unmap_addr(&old_rx_pg, mapping),
449 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
450
451 /* Add one frag and update the appropriate fields in the skb */
452 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
453
454 skb->data_len += frag_len;
455 skb->truesize += frag_len;
456 skb->len += frag_len;
457
458 frag_size -= frag_len;
459 }
460
461 return 0;
462 }
463
464 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
465 u16 queue, struct eth_end_agg_rx_cqe *cqe,
466 u16 cqe_idx)
467 {
468 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
469 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
470 u8 pad = tpa_info->placement_offset;
471 u16 len = tpa_info->len_on_bd;
472 struct sk_buff *skb = rx_buf->skb;
473 /* alloc new skb */
474 struct sk_buff *new_skb;
475 u8 old_tpa_state = tpa_info->tpa_state;
476
477 tpa_info->tpa_state = BNX2X_TPA_STOP;
478
479 /* If we there was an error during the handling of the TPA_START -
480 * drop this aggregation.
481 */
482 if (old_tpa_state == BNX2X_TPA_ERROR)
483 goto drop;
484
485 /* Try to allocate the new skb */
486 new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
487
488 /* Unmap skb in the pool anyway, as we are going to change
489 pool entry status to BNX2X_TPA_STOP even if new skb allocation
490 fails. */
491 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
492 fp->rx_buf_size, DMA_FROM_DEVICE);
493
494 if (likely(new_skb)) {
495 prefetch(skb);
496 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
497
498 #ifdef BNX2X_STOP_ON_ERROR
499 if (pad + len > fp->rx_buf_size) {
500 BNX2X_ERR("skb_put is about to fail... "
501 "pad %d len %d rx_buf_size %d\n",
502 pad, len, fp->rx_buf_size);
503 bnx2x_panic();
504 return;
505 }
506 #endif
507
508 skb_reserve(skb, pad);
509 skb_put(skb, len);
510
511 skb->protocol = eth_type_trans(skb, bp->dev);
512 skb->ip_summed = CHECKSUM_UNNECESSARY;
513
514 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
515 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
516 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
517 napi_gro_receive(&fp->napi, skb);
518 } else {
519 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
520 " - dropping packet!\n");
521 dev_kfree_skb_any(skb);
522 }
523
524
525 /* put new skb in bin */
526 rx_buf->skb = new_skb;
527
528 return;
529 }
530
531 drop:
532 /* drop the packet and keep the buffer in the bin */
533 DP(NETIF_MSG_RX_STATUS,
534 "Failed to allocate or map a new skb - dropping packet!\n");
535 fp->eth_q_stats.rx_skb_alloc_failed++;
536 }
537
538 /* Set Toeplitz hash value in the skb using the value from the
539 * CQE (calculated by HW).
540 */
541 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
542 struct sk_buff *skb)
543 {
544 /* Set Toeplitz hash from CQE */
545 if ((bp->dev->features & NETIF_F_RXHASH) &&
546 (cqe->fast_path_cqe.status_flags &
547 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
548 skb->rxhash =
549 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
550 }
551
552 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
553 {
554 struct bnx2x *bp = fp->bp;
555 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
556 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
557 int rx_pkt = 0;
558
559 #ifdef BNX2X_STOP_ON_ERROR
560 if (unlikely(bp->panic))
561 return 0;
562 #endif
563
564 /* CQ "next element" is of the size of the regular element,
565 that's why it's ok here */
566 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
567 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
568 hw_comp_cons++;
569
570 bd_cons = fp->rx_bd_cons;
571 bd_prod = fp->rx_bd_prod;
572 bd_prod_fw = bd_prod;
573 sw_comp_cons = fp->rx_comp_cons;
574 sw_comp_prod = fp->rx_comp_prod;
575
576 /* Memory barrier necessary as speculative reads of the rx
577 * buffer can be ahead of the index in the status block
578 */
579 rmb();
580
581 DP(NETIF_MSG_RX_STATUS,
582 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
583 fp->index, hw_comp_cons, sw_comp_cons);
584
585 while (sw_comp_cons != hw_comp_cons) {
586 struct sw_rx_bd *rx_buf = NULL;
587 struct sk_buff *skb;
588 union eth_rx_cqe *cqe;
589 struct eth_fast_path_rx_cqe *cqe_fp;
590 u8 cqe_fp_flags;
591 enum eth_rx_cqe_type cqe_fp_type;
592 u16 len, pad;
593
594 #ifdef BNX2X_STOP_ON_ERROR
595 if (unlikely(bp->panic))
596 return 0;
597 #endif
598
599 comp_ring_cons = RCQ_BD(sw_comp_cons);
600 bd_prod = RX_BD(bd_prod);
601 bd_cons = RX_BD(bd_cons);
602
603 /* Prefetch the page containing the BD descriptor
604 at producer's index. It will be needed when new skb is
605 allocated */
606 prefetch((void *)(PAGE_ALIGN((unsigned long)
607 (&fp->rx_desc_ring[bd_prod])) -
608 PAGE_SIZE + 1));
609
610 cqe = &fp->rx_comp_ring[comp_ring_cons];
611 cqe_fp = &cqe->fast_path_cqe;
612 cqe_fp_flags = cqe_fp->type_error_flags;
613 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
614
615 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
616 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
617 cqe_fp_flags, cqe_fp->status_flags,
618 le32_to_cpu(cqe_fp->rss_hash_result),
619 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
620
621 /* is this a slowpath msg? */
622 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
623 bnx2x_sp_event(fp, cqe);
624 goto next_cqe;
625
626 /* this is an rx packet */
627 } else {
628 rx_buf = &fp->rx_buf_ring[bd_cons];
629 skb = rx_buf->skb;
630 prefetch(skb);
631
632 if (!CQE_TYPE_FAST(cqe_fp_type)) {
633 #ifdef BNX2X_STOP_ON_ERROR
634 /* sanity check */
635 if (fp->disable_tpa &&
636 (CQE_TYPE_START(cqe_fp_type) ||
637 CQE_TYPE_STOP(cqe_fp_type)))
638 BNX2X_ERR("START/STOP packet while "
639 "disable_tpa type %x\n",
640 CQE_TYPE(cqe_fp_type));
641 #endif
642
643 if (CQE_TYPE_START(cqe_fp_type)) {
644 u16 queue = cqe_fp->queue_index;
645 DP(NETIF_MSG_RX_STATUS,
646 "calling tpa_start on queue %d\n",
647 queue);
648
649 bnx2x_tpa_start(fp, queue, skb,
650 bd_cons, bd_prod,
651 cqe_fp);
652
653 /* Set Toeplitz hash for LRO skb */
654 bnx2x_set_skb_rxhash(bp, cqe, skb);
655
656 goto next_rx;
657
658 } else {
659 u16 queue =
660 cqe->end_agg_cqe.queue_index;
661 DP(NETIF_MSG_RX_STATUS,
662 "calling tpa_stop on queue %d\n",
663 queue);
664
665 bnx2x_tpa_stop(bp, fp, queue,
666 &cqe->end_agg_cqe,
667 comp_ring_cons);
668 #ifdef BNX2X_STOP_ON_ERROR
669 if (bp->panic)
670 return 0;
671 #endif
672
673 bnx2x_update_sge_prod(fp, cqe_fp);
674 goto next_cqe;
675 }
676 }
677 /* non TPA */
678 len = le16_to_cpu(cqe_fp->pkt_len);
679 pad = cqe_fp->placement_offset;
680 dma_sync_single_for_cpu(&bp->pdev->dev,
681 dma_unmap_addr(rx_buf, mapping),
682 pad + RX_COPY_THRESH,
683 DMA_FROM_DEVICE);
684 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
685
686 /* is this an error packet? */
687 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
688 DP(NETIF_MSG_RX_ERR,
689 "ERROR flags %x rx packet %u\n",
690 cqe_fp_flags, sw_comp_cons);
691 fp->eth_q_stats.rx_err_discard_pkt++;
692 goto reuse_rx;
693 }
694
695 /* Since we don't have a jumbo ring
696 * copy small packets if mtu > 1500
697 */
698 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
699 (len <= RX_COPY_THRESH)) {
700 struct sk_buff *new_skb;
701
702 new_skb = netdev_alloc_skb(bp->dev, len + pad);
703 if (new_skb == NULL) {
704 DP(NETIF_MSG_RX_ERR,
705 "ERROR packet dropped "
706 "because of alloc failure\n");
707 fp->eth_q_stats.rx_skb_alloc_failed++;
708 goto reuse_rx;
709 }
710
711 /* aligned copy */
712 skb_copy_from_linear_data_offset(skb, pad,
713 new_skb->data + pad, len);
714 skb_reserve(new_skb, pad);
715 skb_put(new_skb, len);
716
717 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
718
719 skb = new_skb;
720
721 } else
722 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
723 dma_unmap_single(&bp->pdev->dev,
724 dma_unmap_addr(rx_buf, mapping),
725 fp->rx_buf_size,
726 DMA_FROM_DEVICE);
727 skb_reserve(skb, pad);
728 skb_put(skb, len);
729
730 } else {
731 DP(NETIF_MSG_RX_ERR,
732 "ERROR packet dropped because "
733 "of alloc failure\n");
734 fp->eth_q_stats.rx_skb_alloc_failed++;
735 reuse_rx:
736 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
737 goto next_rx;
738 }
739
740 skb->protocol = eth_type_trans(skb, bp->dev);
741
742 /* Set Toeplitz hash for a none-LRO skb */
743 bnx2x_set_skb_rxhash(bp, cqe, skb);
744
745 skb_checksum_none_assert(skb);
746
747 if (bp->dev->features & NETIF_F_RXCSUM) {
748
749 if (likely(BNX2X_RX_CSUM_OK(cqe)))
750 skb->ip_summed = CHECKSUM_UNNECESSARY;
751 else
752 fp->eth_q_stats.hw_csum_err++;
753 }
754 }
755
756 skb_record_rx_queue(skb, fp->index);
757
758 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
759 PARSING_FLAGS_VLAN)
760 __vlan_hwaccel_put_tag(skb,
761 le16_to_cpu(cqe_fp->vlan_tag));
762 napi_gro_receive(&fp->napi, skb);
763
764
765 next_rx:
766 rx_buf->skb = NULL;
767
768 bd_cons = NEXT_RX_IDX(bd_cons);
769 bd_prod = NEXT_RX_IDX(bd_prod);
770 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
771 rx_pkt++;
772 next_cqe:
773 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
774 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
775
776 if (rx_pkt == budget)
777 break;
778 } /* while */
779
780 fp->rx_bd_cons = bd_cons;
781 fp->rx_bd_prod = bd_prod_fw;
782 fp->rx_comp_cons = sw_comp_cons;
783 fp->rx_comp_prod = sw_comp_prod;
784
785 /* Update producers */
786 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
787 fp->rx_sge_prod);
788
789 fp->rx_pkt += rx_pkt;
790 fp->rx_calls++;
791
792 return rx_pkt;
793 }
794
795 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
796 {
797 struct bnx2x_fastpath *fp = fp_cookie;
798 struct bnx2x *bp = fp->bp;
799 u8 cos;
800
801 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
802 "[fp %d fw_sd %d igusb %d]\n",
803 fp->index, fp->fw_sb_id, fp->igu_sb_id);
804 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
805
806 #ifdef BNX2X_STOP_ON_ERROR
807 if (unlikely(bp->panic))
808 return IRQ_HANDLED;
809 #endif
810
811 /* Handle Rx and Tx according to MSI-X vector */
812 prefetch(fp->rx_cons_sb);
813
814 for_each_cos_in_tx_queue(fp, cos)
815 prefetch(fp->txdata[cos].tx_cons_sb);
816
817 prefetch(&fp->sb_running_index[SM_RX_ID]);
818 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
819
820 return IRQ_HANDLED;
821 }
822
823 /* HW Lock for shared dual port PHYs */
824 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
825 {
826 mutex_lock(&bp->port.phy_mutex);
827
828 if (bp->port.need_hw_lock)
829 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
830 }
831
832 void bnx2x_release_phy_lock(struct bnx2x *bp)
833 {
834 if (bp->port.need_hw_lock)
835 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
836
837 mutex_unlock(&bp->port.phy_mutex);
838 }
839
840 /* calculates MF speed according to current linespeed and MF configuration */
841 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
842 {
843 u16 line_speed = bp->link_vars.line_speed;
844 if (IS_MF(bp)) {
845 u16 maxCfg = bnx2x_extract_max_cfg(bp,
846 bp->mf_config[BP_VN(bp)]);
847
848 /* Calculate the current MAX line speed limit for the MF
849 * devices
850 */
851 if (IS_MF_SI(bp))
852 line_speed = (line_speed * maxCfg) / 100;
853 else { /* SD mode */
854 u16 vn_max_rate = maxCfg * 100;
855
856 if (vn_max_rate < line_speed)
857 line_speed = vn_max_rate;
858 }
859 }
860
861 return line_speed;
862 }
863
864 /**
865 * bnx2x_fill_report_data - fill link report data to report
866 *
867 * @bp: driver handle
868 * @data: link state to update
869 *
870 * It uses a none-atomic bit operations because is called under the mutex.
871 */
872 static inline void bnx2x_fill_report_data(struct bnx2x *bp,
873 struct bnx2x_link_report_data *data)
874 {
875 u16 line_speed = bnx2x_get_mf_speed(bp);
876
877 memset(data, 0, sizeof(*data));
878
879 /* Fill the report data: efective line speed */
880 data->line_speed = line_speed;
881
882 /* Link is down */
883 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
884 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
885 &data->link_report_flags);
886
887 /* Full DUPLEX */
888 if (bp->link_vars.duplex == DUPLEX_FULL)
889 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
890
891 /* Rx Flow Control is ON */
892 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
893 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
894
895 /* Tx Flow Control is ON */
896 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
897 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
898 }
899
900 /**
901 * bnx2x_link_report - report link status to OS.
902 *
903 * @bp: driver handle
904 *
905 * Calls the __bnx2x_link_report() under the same locking scheme
906 * as a link/PHY state managing code to ensure a consistent link
907 * reporting.
908 */
909
910 void bnx2x_link_report(struct bnx2x *bp)
911 {
912 bnx2x_acquire_phy_lock(bp);
913 __bnx2x_link_report(bp);
914 bnx2x_release_phy_lock(bp);
915 }
916
917 /**
918 * __bnx2x_link_report - report link status to OS.
919 *
920 * @bp: driver handle
921 *
922 * None atomic inmlementation.
923 * Should be called under the phy_lock.
924 */
925 void __bnx2x_link_report(struct bnx2x *bp)
926 {
927 struct bnx2x_link_report_data cur_data;
928
929 /* reread mf_cfg */
930 if (!CHIP_IS_E1(bp))
931 bnx2x_read_mf_cfg(bp);
932
933 /* Read the current link report info */
934 bnx2x_fill_report_data(bp, &cur_data);
935
936 /* Don't report link down or exactly the same link status twice */
937 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
938 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
939 &bp->last_reported_link.link_report_flags) &&
940 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
941 &cur_data.link_report_flags)))
942 return;
943
944 bp->link_cnt++;
945
946 /* We are going to report a new link parameters now -
947 * remember the current data for the next time.
948 */
949 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
950
951 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
952 &cur_data.link_report_flags)) {
953 netif_carrier_off(bp->dev);
954 netdev_err(bp->dev, "NIC Link is Down\n");
955 return;
956 } else {
957 netif_carrier_on(bp->dev);
958 netdev_info(bp->dev, "NIC Link is Up, ");
959 pr_cont("%d Mbps ", cur_data.line_speed);
960
961 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
962 &cur_data.link_report_flags))
963 pr_cont("full duplex");
964 else
965 pr_cont("half duplex");
966
967 /* Handle the FC at the end so that only these flags would be
968 * possibly set. This way we may easily check if there is no FC
969 * enabled.
970 */
971 if (cur_data.link_report_flags) {
972 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
973 &cur_data.link_report_flags)) {
974 pr_cont(", receive ");
975 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
976 &cur_data.link_report_flags))
977 pr_cont("& transmit ");
978 } else {
979 pr_cont(", transmit ");
980 }
981 pr_cont("flow control ON");
982 }
983 pr_cont("\n");
984 }
985 }
986
987 void bnx2x_init_rx_rings(struct bnx2x *bp)
988 {
989 int func = BP_FUNC(bp);
990 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
991 ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
992 u16 ring_prod;
993 int i, j;
994
995 /* Allocate TPA resources */
996 for_each_rx_queue(bp, j) {
997 struct bnx2x_fastpath *fp = &bp->fp[j];
998
999 DP(NETIF_MSG_IFUP,
1000 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1001
1002 if (!fp->disable_tpa) {
1003 /* Fill the per-aggregtion pool */
1004 for (i = 0; i < max_agg_queues; i++) {
1005 struct bnx2x_agg_info *tpa_info =
1006 &fp->tpa_info[i];
1007 struct sw_rx_bd *first_buf =
1008 &tpa_info->first_buf;
1009
1010 first_buf->skb = netdev_alloc_skb(bp->dev,
1011 fp->rx_buf_size);
1012 if (!first_buf->skb) {
1013 BNX2X_ERR("Failed to allocate TPA "
1014 "skb pool for queue[%d] - "
1015 "disabling TPA on this "
1016 "queue!\n", j);
1017 bnx2x_free_tpa_pool(bp, fp, i);
1018 fp->disable_tpa = 1;
1019 break;
1020 }
1021 dma_unmap_addr_set(first_buf, mapping, 0);
1022 tpa_info->tpa_state = BNX2X_TPA_STOP;
1023 }
1024
1025 /* "next page" elements initialization */
1026 bnx2x_set_next_page_sgl(fp);
1027
1028 /* set SGEs bit mask */
1029 bnx2x_init_sge_ring_bit_mask(fp);
1030
1031 /* Allocate SGEs and initialize the ring elements */
1032 for (i = 0, ring_prod = 0;
1033 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1034
1035 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1036 BNX2X_ERR("was only able to allocate "
1037 "%d rx sges\n", i);
1038 BNX2X_ERR("disabling TPA for "
1039 "queue[%d]\n", j);
1040 /* Cleanup already allocated elements */
1041 bnx2x_free_rx_sge_range(bp, fp,
1042 ring_prod);
1043 bnx2x_free_tpa_pool(bp, fp,
1044 max_agg_queues);
1045 fp->disable_tpa = 1;
1046 ring_prod = 0;
1047 break;
1048 }
1049 ring_prod = NEXT_SGE_IDX(ring_prod);
1050 }
1051
1052 fp->rx_sge_prod = ring_prod;
1053 }
1054 }
1055
1056 for_each_rx_queue(bp, j) {
1057 struct bnx2x_fastpath *fp = &bp->fp[j];
1058
1059 fp->rx_bd_cons = 0;
1060
1061 /* Activate BD ring */
1062 /* Warning!
1063 * this will generate an interrupt (to the TSTORM)
1064 * must only be done after chip is initialized
1065 */
1066 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1067 fp->rx_sge_prod);
1068
1069 if (j != 0)
1070 continue;
1071
1072 if (CHIP_IS_E1(bp)) {
1073 REG_WR(bp, BAR_USTRORM_INTMEM +
1074 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1075 U64_LO(fp->rx_comp_mapping));
1076 REG_WR(bp, BAR_USTRORM_INTMEM +
1077 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1078 U64_HI(fp->rx_comp_mapping));
1079 }
1080 }
1081 }
1082
1083 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1084 {
1085 int i;
1086 u8 cos;
1087
1088 for_each_tx_queue(bp, i) {
1089 struct bnx2x_fastpath *fp = &bp->fp[i];
1090 for_each_cos_in_tx_queue(fp, cos) {
1091 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
1092
1093 u16 bd_cons = txdata->tx_bd_cons;
1094 u16 sw_prod = txdata->tx_pkt_prod;
1095 u16 sw_cons = txdata->tx_pkt_cons;
1096
1097 while (sw_cons != sw_prod) {
1098 bd_cons = bnx2x_free_tx_pkt(bp, txdata,
1099 TX_BD(sw_cons));
1100 sw_cons++;
1101 }
1102 }
1103 }
1104 }
1105
1106 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1107 {
1108 struct bnx2x *bp = fp->bp;
1109 int i;
1110
1111 /* ring wasn't allocated */
1112 if (fp->rx_buf_ring == NULL)
1113 return;
1114
1115 for (i = 0; i < NUM_RX_BD; i++) {
1116 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1117 struct sk_buff *skb = rx_buf->skb;
1118
1119 if (skb == NULL)
1120 continue;
1121 dma_unmap_single(&bp->pdev->dev,
1122 dma_unmap_addr(rx_buf, mapping),
1123 fp->rx_buf_size, DMA_FROM_DEVICE);
1124
1125 rx_buf->skb = NULL;
1126 dev_kfree_skb(skb);
1127 }
1128 }
1129
1130 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1131 {
1132 int j;
1133
1134 for_each_rx_queue(bp, j) {
1135 struct bnx2x_fastpath *fp = &bp->fp[j];
1136
1137 bnx2x_free_rx_bds(fp);
1138
1139 if (!fp->disable_tpa)
1140 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1141 ETH_MAX_AGGREGATION_QUEUES_E1 :
1142 ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
1143 }
1144 }
1145
1146 void bnx2x_free_skbs(struct bnx2x *bp)
1147 {
1148 bnx2x_free_tx_skbs(bp);
1149 bnx2x_free_rx_skbs(bp);
1150 }
1151
1152 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1153 {
1154 /* load old values */
1155 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1156
1157 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1158 /* leave all but MAX value */
1159 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1160
1161 /* set new MAX value */
1162 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1163 & FUNC_MF_CFG_MAX_BW_MASK;
1164
1165 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1166 }
1167 }
1168
1169 /**
1170 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1171 *
1172 * @bp: driver handle
1173 * @nvecs: number of vectors to be released
1174 */
1175 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1176 {
1177 int i, offset = 0;
1178
1179 if (nvecs == offset)
1180 return;
1181 free_irq(bp->msix_table[offset].vector, bp->dev);
1182 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1183 bp->msix_table[offset].vector);
1184 offset++;
1185 #ifdef BCM_CNIC
1186 if (nvecs == offset)
1187 return;
1188 offset++;
1189 #endif
1190
1191 for_each_eth_queue(bp, i) {
1192 if (nvecs == offset)
1193 return;
1194 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1195 "irq\n", i, bp->msix_table[offset].vector);
1196
1197 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1198 }
1199 }
1200
1201 void bnx2x_free_irq(struct bnx2x *bp)
1202 {
1203 if (bp->flags & USING_MSIX_FLAG)
1204 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1205 CNIC_PRESENT + 1);
1206 else if (bp->flags & USING_MSI_FLAG)
1207 free_irq(bp->pdev->irq, bp->dev);
1208 else
1209 free_irq(bp->pdev->irq, bp->dev);
1210 }
1211
1212 int bnx2x_enable_msix(struct bnx2x *bp)
1213 {
1214 int msix_vec = 0, i, rc, req_cnt;
1215
1216 bp->msix_table[msix_vec].entry = msix_vec;
1217 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1218 bp->msix_table[0].entry);
1219 msix_vec++;
1220
1221 #ifdef BCM_CNIC
1222 bp->msix_table[msix_vec].entry = msix_vec;
1223 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1224 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1225 msix_vec++;
1226 #endif
1227 /* We need separate vectors for ETH queues only (not FCoE) */
1228 for_each_eth_queue(bp, i) {
1229 bp->msix_table[msix_vec].entry = msix_vec;
1230 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1231 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1232 msix_vec++;
1233 }
1234
1235 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1236
1237 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1238
1239 /*
1240 * reconfigure number of tx/rx queues according to available
1241 * MSI-X vectors
1242 */
1243 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1244 /* how less vectors we will have? */
1245 int diff = req_cnt - rc;
1246
1247 DP(NETIF_MSG_IFUP,
1248 "Trying to use less MSI-X vectors: %d\n", rc);
1249
1250 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1251
1252 if (rc) {
1253 DP(NETIF_MSG_IFUP,
1254 "MSI-X is not attainable rc %d\n", rc);
1255 return rc;
1256 }
1257 /*
1258 * decrease number of queues by number of unallocated entries
1259 */
1260 bp->num_queues -= diff;
1261
1262 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1263 bp->num_queues);
1264 } else if (rc) {
1265 /* fall to INTx if not enough memory */
1266 if (rc == -ENOMEM)
1267 bp->flags |= DISABLE_MSI_FLAG;
1268 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1269 return rc;
1270 }
1271
1272 bp->flags |= USING_MSIX_FLAG;
1273
1274 return 0;
1275 }
1276
1277 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1278 {
1279 int i, rc, offset = 0;
1280
1281 rc = request_irq(bp->msix_table[offset++].vector,
1282 bnx2x_msix_sp_int, 0,
1283 bp->dev->name, bp->dev);
1284 if (rc) {
1285 BNX2X_ERR("request sp irq failed\n");
1286 return -EBUSY;
1287 }
1288
1289 #ifdef BCM_CNIC
1290 offset++;
1291 #endif
1292 for_each_eth_queue(bp, i) {
1293 struct bnx2x_fastpath *fp = &bp->fp[i];
1294 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1295 bp->dev->name, i);
1296
1297 rc = request_irq(bp->msix_table[offset].vector,
1298 bnx2x_msix_fp_int, 0, fp->name, fp);
1299 if (rc) {
1300 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1301 bp->msix_table[offset].vector, rc);
1302 bnx2x_free_msix_irqs(bp, offset);
1303 return -EBUSY;
1304 }
1305
1306 offset++;
1307 }
1308
1309 i = BNX2X_NUM_ETH_QUEUES(bp);
1310 offset = 1 + CNIC_PRESENT;
1311 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1312 " ... fp[%d] %d\n",
1313 bp->msix_table[0].vector,
1314 0, bp->msix_table[offset].vector,
1315 i - 1, bp->msix_table[offset + i - 1].vector);
1316
1317 return 0;
1318 }
1319
1320 int bnx2x_enable_msi(struct bnx2x *bp)
1321 {
1322 int rc;
1323
1324 rc = pci_enable_msi(bp->pdev);
1325 if (rc) {
1326 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1327 return -1;
1328 }
1329 bp->flags |= USING_MSI_FLAG;
1330
1331 return 0;
1332 }
1333
1334 static int bnx2x_req_irq(struct bnx2x *bp)
1335 {
1336 unsigned long flags;
1337 int rc;
1338
1339 if (bp->flags & USING_MSI_FLAG)
1340 flags = 0;
1341 else
1342 flags = IRQF_SHARED;
1343
1344 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1345 bp->dev->name, bp->dev);
1346 return rc;
1347 }
1348
1349 static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1350 {
1351 int rc = 0;
1352 if (bp->flags & USING_MSIX_FLAG) {
1353 rc = bnx2x_req_msix_irqs(bp);
1354 if (rc)
1355 return rc;
1356 } else {
1357 bnx2x_ack_int(bp);
1358 rc = bnx2x_req_irq(bp);
1359 if (rc) {
1360 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1361 return rc;
1362 }
1363 if (bp->flags & USING_MSI_FLAG) {
1364 bp->dev->irq = bp->pdev->irq;
1365 netdev_info(bp->dev, "using MSI IRQ %d\n",
1366 bp->pdev->irq);
1367 }
1368 }
1369
1370 return 0;
1371 }
1372
1373 static inline void bnx2x_napi_enable(struct bnx2x *bp)
1374 {
1375 int i;
1376
1377 for_each_rx_queue(bp, i)
1378 napi_enable(&bnx2x_fp(bp, i, napi));
1379 }
1380
1381 static inline void bnx2x_napi_disable(struct bnx2x *bp)
1382 {
1383 int i;
1384
1385 for_each_rx_queue(bp, i)
1386 napi_disable(&bnx2x_fp(bp, i, napi));
1387 }
1388
1389 void bnx2x_netif_start(struct bnx2x *bp)
1390 {
1391 if (netif_running(bp->dev)) {
1392 bnx2x_napi_enable(bp);
1393 bnx2x_int_enable(bp);
1394 if (bp->state == BNX2X_STATE_OPEN)
1395 netif_tx_wake_all_queues(bp->dev);
1396 }
1397 }
1398
1399 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1400 {
1401 bnx2x_int_disable_sync(bp, disable_hw);
1402 bnx2x_napi_disable(bp);
1403 }
1404
1405 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1406 {
1407 struct bnx2x *bp = netdev_priv(dev);
1408 #ifdef BCM_CNIC
1409 if (NO_FCOE(bp))
1410 return skb_tx_hash(dev, skb);
1411 else {
1412 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1413 u16 ether_type = ntohs(hdr->h_proto);
1414
1415 /* Skip VLAN tag if present */
1416 if (ether_type == ETH_P_8021Q) {
1417 struct vlan_ethhdr *vhdr =
1418 (struct vlan_ethhdr *)skb->data;
1419
1420 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1421 }
1422
1423 /* If ethertype is FCoE or FIP - use FCoE ring */
1424 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1425 return bnx2x_fcoe_tx(bp, txq_index);
1426 }
1427 #endif
1428 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1429 */
1430 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1431 }
1432
1433 void bnx2x_set_num_queues(struct bnx2x *bp)
1434 {
1435 switch (bp->multi_mode) {
1436 case ETH_RSS_MODE_DISABLED:
1437 bp->num_queues = 1;
1438 break;
1439 case ETH_RSS_MODE_REGULAR:
1440 bp->num_queues = bnx2x_calc_num_queues(bp);
1441 break;
1442
1443 default:
1444 bp->num_queues = 1;
1445 break;
1446 }
1447
1448 /* Add special queues */
1449 bp->num_queues += NON_ETH_CONTEXT_USE;
1450 }
1451
1452 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1453 {
1454 int rc, tx, rx;
1455
1456 tx = MAX_TXQS_PER_COS * bp->max_cos;
1457 rx = BNX2X_NUM_ETH_QUEUES(bp);
1458
1459 /* account for fcoe queue */
1460 #ifdef BCM_CNIC
1461 if (!NO_FCOE(bp)) {
1462 rx += FCOE_PRESENT;
1463 tx += FCOE_PRESENT;
1464 }
1465 #endif
1466
1467 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1468 if (rc) {
1469 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1470 return rc;
1471 }
1472 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1473 if (rc) {
1474 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1475 return rc;
1476 }
1477
1478 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1479 tx, rx);
1480
1481 return rc;
1482 }
1483
1484 static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1485 {
1486 int i;
1487
1488 for_each_queue(bp, i) {
1489 struct bnx2x_fastpath *fp = &bp->fp[i];
1490
1491 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1492 if (IS_FCOE_IDX(i))
1493 /*
1494 * Although there are no IP frames expected to arrive to
1495 * this ring we still want to add an
1496 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1497 * overrun attack.
1498 */
1499 fp->rx_buf_size =
1500 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1501 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1502 else
1503 fp->rx_buf_size =
1504 bp->dev->mtu + ETH_OVREHEAD +
1505 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1506 }
1507 }
1508
1509 static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1510 {
1511 int i;
1512 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1513 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1514
1515 /*
1516 * Prepare the inital contents fo the indirection table if RSS is
1517 * enabled
1518 */
1519 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1520 for (i = 0; i < sizeof(ind_table); i++)
1521 ind_table[i] =
1522 bp->fp->cl_id + (i % num_eth_queues);
1523 }
1524
1525 /*
1526 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1527 * per-port, so if explicit configuration is needed , do it only
1528 * for a PMF.
1529 *
1530 * For 57712 and newer on the other hand it's a per-function
1531 * configuration.
1532 */
1533 return bnx2x_config_rss_pf(bp, ind_table,
1534 bp->port.pmf || !CHIP_IS_E1x(bp));
1535 }
1536
1537 int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1538 {
1539 struct bnx2x_config_rss_params params = {0};
1540 int i;
1541
1542 /* Although RSS is meaningless when there is a single HW queue we
1543 * still need it enabled in order to have HW Rx hash generated.
1544 *
1545 * if (!is_eth_multi(bp))
1546 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1547 */
1548
1549 params.rss_obj = &bp->rss_conf_obj;
1550
1551 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1552
1553 /* RSS mode */
1554 switch (bp->multi_mode) {
1555 case ETH_RSS_MODE_DISABLED:
1556 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1557 break;
1558 case ETH_RSS_MODE_REGULAR:
1559 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1560 break;
1561 case ETH_RSS_MODE_VLAN_PRI:
1562 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1563 break;
1564 case ETH_RSS_MODE_E1HOV_PRI:
1565 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1566 break;
1567 case ETH_RSS_MODE_IP_DSCP:
1568 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1569 break;
1570 default:
1571 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1572 return -EINVAL;
1573 }
1574
1575 /* If RSS is enabled */
1576 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1577 /* RSS configuration */
1578 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1579 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1580 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1581 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1582
1583 /* Hash bits */
1584 params.rss_result_mask = MULTI_MASK;
1585
1586 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1587
1588 if (config_hash) {
1589 /* RSS keys */
1590 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1591 params.rss_key[i] = random32();
1592
1593 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1594 }
1595 }
1596
1597 return bnx2x_config_rss(bp, &params);
1598 }
1599
1600 static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1601 {
1602 struct bnx2x_func_state_params func_params = {0};
1603
1604 /* Prepare parameters for function state transitions */
1605 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1606
1607 func_params.f_obj = &bp->func_obj;
1608 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1609
1610 func_params.params.hw_init.load_phase = load_code;
1611
1612 return bnx2x_func_state_change(bp, &func_params);
1613 }
1614
1615 /*
1616 * Cleans the object that have internal lists without sending
1617 * ramrods. Should be run when interrutps are disabled.
1618 */
1619 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1620 {
1621 int rc;
1622 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1623 struct bnx2x_mcast_ramrod_params rparam = {0};
1624 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1625
1626 /***************** Cleanup MACs' object first *************************/
1627
1628 /* Wait for completion of requested */
1629 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1630 /* Perform a dry cleanup */
1631 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1632
1633 /* Clean ETH primary MAC */
1634 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1635 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1636 &ramrod_flags);
1637 if (rc != 0)
1638 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1639
1640 /* Cleanup UC list */
1641 vlan_mac_flags = 0;
1642 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1643 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1644 &ramrod_flags);
1645 if (rc != 0)
1646 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1647
1648 /***************** Now clean mcast object *****************************/
1649 rparam.mcast_obj = &bp->mcast_obj;
1650 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1651
1652 /* Add a DEL command... */
1653 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1654 if (rc < 0)
1655 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1656 "object: %d\n", rc);
1657
1658 /* ...and wait until all pending commands are cleared */
1659 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1660 while (rc != 0) {
1661 if (rc < 0) {
1662 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1663 rc);
1664 return;
1665 }
1666
1667 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1668 }
1669 }
1670
1671 #ifndef BNX2X_STOP_ON_ERROR
1672 #define LOAD_ERROR_EXIT(bp, label) \
1673 do { \
1674 (bp)->state = BNX2X_STATE_ERROR; \
1675 goto label; \
1676 } while (0)
1677 #else
1678 #define LOAD_ERROR_EXIT(bp, label) \
1679 do { \
1680 (bp)->state = BNX2X_STATE_ERROR; \
1681 (bp)->panic = 1; \
1682 return -EBUSY; \
1683 } while (0)
1684 #endif
1685
1686 /* must be called with rtnl_lock */
1687 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1688 {
1689 int port = BP_PORT(bp);
1690 u32 load_code;
1691 int i, rc;
1692
1693 #ifdef BNX2X_STOP_ON_ERROR
1694 if (unlikely(bp->panic))
1695 return -EPERM;
1696 #endif
1697
1698 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1699
1700 /* Set the initial link reported state to link down */
1701 bnx2x_acquire_phy_lock(bp);
1702 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1703 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1704 &bp->last_reported_link.link_report_flags);
1705 bnx2x_release_phy_lock(bp);
1706
1707 /* must be called before memory allocation and HW init */
1708 bnx2x_ilt_set_info(bp);
1709
1710 /*
1711 * Zero fastpath structures preserving invariants like napi, which are
1712 * allocated only once, fp index, max_cos, bp pointer.
1713 * Also set fp->disable_tpa.
1714 */
1715 for_each_queue(bp, i)
1716 bnx2x_bz_fp(bp, i);
1717
1718
1719 /* Set the receive queues buffer size */
1720 bnx2x_set_rx_buf_size(bp);
1721
1722 if (bnx2x_alloc_mem(bp))
1723 return -ENOMEM;
1724
1725 /* As long as bnx2x_alloc_mem() may possibly update
1726 * bp->num_queues, bnx2x_set_real_num_queues() should always
1727 * come after it.
1728 */
1729 rc = bnx2x_set_real_num_queues(bp);
1730 if (rc) {
1731 BNX2X_ERR("Unable to set real_num_queues\n");
1732 LOAD_ERROR_EXIT(bp, load_error0);
1733 }
1734
1735 /* configure multi cos mappings in kernel.
1736 * this configuration may be overriden by a multi class queue discipline
1737 * or by a dcbx negotiation result.
1738 */
1739 bnx2x_setup_tc(bp->dev, bp->max_cos);
1740
1741 bnx2x_napi_enable(bp);
1742
1743 /* Send LOAD_REQUEST command to MCP
1744 * Returns the type of LOAD command:
1745 * if it is the first port to be initialized
1746 * common blocks should be initialized, otherwise - not
1747 */
1748 if (!BP_NOMCP(bp)) {
1749 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1750 if (!load_code) {
1751 BNX2X_ERR("MCP response failure, aborting\n");
1752 rc = -EBUSY;
1753 LOAD_ERROR_EXIT(bp, load_error1);
1754 }
1755 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1756 rc = -EBUSY; /* other port in diagnostic mode */
1757 LOAD_ERROR_EXIT(bp, load_error1);
1758 }
1759
1760 } else {
1761 int path = BP_PATH(bp);
1762
1763 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1764 path, load_count[path][0], load_count[path][1],
1765 load_count[path][2]);
1766 load_count[path][0]++;
1767 load_count[path][1 + port]++;
1768 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1769 path, load_count[path][0], load_count[path][1],
1770 load_count[path][2]);
1771 if (load_count[path][0] == 1)
1772 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1773 else if (load_count[path][1 + port] == 1)
1774 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1775 else
1776 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1777 }
1778
1779 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1780 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1781 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
1782 bp->port.pmf = 1;
1783 /*
1784 * We need the barrier to ensure the ordering between the
1785 * writing to bp->port.pmf here and reading it from the
1786 * bnx2x_periodic_task().
1787 */
1788 smp_mb();
1789 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1790 } else
1791 bp->port.pmf = 0;
1792
1793 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1794
1795 /* Init Function state controlling object */
1796 bnx2x__init_func_obj(bp);
1797
1798 /* Initialize HW */
1799 rc = bnx2x_init_hw(bp, load_code);
1800 if (rc) {
1801 BNX2X_ERR("HW init failed, aborting\n");
1802 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1803 LOAD_ERROR_EXIT(bp, load_error2);
1804 }
1805
1806 /* Connect to IRQs */
1807 rc = bnx2x_setup_irqs(bp);
1808 if (rc) {
1809 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1810 LOAD_ERROR_EXIT(bp, load_error2);
1811 }
1812
1813 /* Setup NIC internals and enable interrupts */
1814 bnx2x_nic_init(bp, load_code);
1815
1816 /* Init per-function objects */
1817 bnx2x_init_bp_objs(bp);
1818
1819 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1820 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1821 (bp->common.shmem2_base)) {
1822 if (SHMEM2_HAS(bp, dcc_support))
1823 SHMEM2_WR(bp, dcc_support,
1824 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1825 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1826 }
1827
1828 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1829 rc = bnx2x_func_start(bp);
1830 if (rc) {
1831 BNX2X_ERR("Function start failed!\n");
1832 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1833 LOAD_ERROR_EXIT(bp, load_error3);
1834 }
1835
1836 /* Send LOAD_DONE command to MCP */
1837 if (!BP_NOMCP(bp)) {
1838 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1839 if (!load_code) {
1840 BNX2X_ERR("MCP response failure, aborting\n");
1841 rc = -EBUSY;
1842 LOAD_ERROR_EXIT(bp, load_error3);
1843 }
1844 }
1845
1846 rc = bnx2x_setup_leading(bp);
1847 if (rc) {
1848 BNX2X_ERR("Setup leading failed!\n");
1849 LOAD_ERROR_EXIT(bp, load_error3);
1850 }
1851
1852 #ifdef BCM_CNIC
1853 /* Enable Timer scan */
1854 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
1855 #endif
1856
1857 for_each_nondefault_queue(bp, i) {
1858 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
1859 if (rc)
1860 LOAD_ERROR_EXIT(bp, load_error4);
1861 }
1862
1863 rc = bnx2x_init_rss_pf(bp);
1864 if (rc)
1865 LOAD_ERROR_EXIT(bp, load_error4);
1866
1867 /* Now when Clients are configured we are ready to work */
1868 bp->state = BNX2X_STATE_OPEN;
1869
1870 /* Configure a ucast MAC */
1871 rc = bnx2x_set_eth_mac(bp, true);
1872 if (rc)
1873 LOAD_ERROR_EXIT(bp, load_error4);
1874
1875 if (bp->pending_max) {
1876 bnx2x_update_max_mf_config(bp, bp->pending_max);
1877 bp->pending_max = 0;
1878 }
1879
1880 if (bp->port.pmf)
1881 bnx2x_initial_phy_init(bp, load_mode);
1882
1883 /* Start fast path */
1884
1885 /* Initialize Rx filter. */
1886 netif_addr_lock_bh(bp->dev);
1887 bnx2x_set_rx_mode(bp->dev);
1888 netif_addr_unlock_bh(bp->dev);
1889
1890 /* Start the Tx */
1891 switch (load_mode) {
1892 case LOAD_NORMAL:
1893 /* Tx queue should be only reenabled */
1894 netif_tx_wake_all_queues(bp->dev);
1895 break;
1896
1897 case LOAD_OPEN:
1898 netif_tx_start_all_queues(bp->dev);
1899 smp_mb__after_clear_bit();
1900 break;
1901
1902 case LOAD_DIAG:
1903 bp->state = BNX2X_STATE_DIAG;
1904 break;
1905
1906 default:
1907 break;
1908 }
1909
1910 if (!bp->port.pmf)
1911 bnx2x__link_status_update(bp);
1912
1913 /* start the timer */
1914 mod_timer(&bp->timer, jiffies + bp->current_interval);
1915
1916 #ifdef BCM_CNIC
1917 bnx2x_setup_cnic_irq_info(bp);
1918 if (bp->state == BNX2X_STATE_OPEN)
1919 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1920 #endif
1921 bnx2x_inc_load_cnt(bp);
1922
1923 /* Wait for all pending SP commands to complete */
1924 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1925 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1926 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1927 return -EBUSY;
1928 }
1929
1930 bnx2x_dcbx_init(bp);
1931 return 0;
1932
1933 #ifndef BNX2X_STOP_ON_ERROR
1934 load_error4:
1935 #ifdef BCM_CNIC
1936 /* Disable Timer scan */
1937 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
1938 #endif
1939 load_error3:
1940 bnx2x_int_disable_sync(bp, 1);
1941
1942 /* Clean queueable objects */
1943 bnx2x_squeeze_objects(bp);
1944
1945 /* Free SKBs, SGEs, TPA pool and driver internals */
1946 bnx2x_free_skbs(bp);
1947 for_each_rx_queue(bp, i)
1948 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1949
1950 /* Release IRQs */
1951 bnx2x_free_irq(bp);
1952 load_error2:
1953 if (!BP_NOMCP(bp)) {
1954 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1955 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1956 }
1957
1958 bp->port.pmf = 0;
1959 load_error1:
1960 bnx2x_napi_disable(bp);
1961 load_error0:
1962 bnx2x_free_mem(bp);
1963
1964 return rc;
1965 #endif /* ! BNX2X_STOP_ON_ERROR */
1966 }
1967
1968 /* must be called with rtnl_lock */
1969 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1970 {
1971 int i;
1972 bool global = false;
1973
1974 if ((bp->state == BNX2X_STATE_CLOSED) ||
1975 (bp->state == BNX2X_STATE_ERROR)) {
1976 /* We can get here if the driver has been unloaded
1977 * during parity error recovery and is either waiting for a
1978 * leader to complete or for other functions to unload and
1979 * then ifdown has been issued. In this case we want to
1980 * unload and let other functions to complete a recovery
1981 * process.
1982 */
1983 bp->recovery_state = BNX2X_RECOVERY_DONE;
1984 bp->is_leader = 0;
1985 bnx2x_release_leader_lock(bp);
1986 smp_mb();
1987
1988 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
1989
1990 return -EINVAL;
1991 }
1992
1993 /*
1994 * It's important to set the bp->state to the value different from
1995 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
1996 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
1997 */
1998 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1999 smp_mb();
2000
2001 /* Stop Tx */
2002 bnx2x_tx_disable(bp);
2003
2004 #ifdef BCM_CNIC
2005 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2006 #endif
2007
2008 bp->rx_mode = BNX2X_RX_MODE_NONE;
2009
2010 del_timer_sync(&bp->timer);
2011
2012 /* Set ALWAYS_ALIVE bit in shmem */
2013 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2014
2015 bnx2x_drv_pulse(bp);
2016
2017 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2018
2019 /* Cleanup the chip if needed */
2020 if (unload_mode != UNLOAD_RECOVERY)
2021 bnx2x_chip_cleanup(bp, unload_mode);
2022 else {
2023 /* Send the UNLOAD_REQUEST to the MCP */
2024 bnx2x_send_unload_req(bp, unload_mode);
2025
2026 /*
2027 * Prevent transactions to host from the functions on the
2028 * engine that doesn't reset global blocks in case of global
2029 * attention once gloabl blocks are reset and gates are opened
2030 * (the engine which leader will perform the recovery
2031 * last).
2032 */
2033 if (!CHIP_IS_E1x(bp))
2034 bnx2x_pf_disable(bp);
2035
2036 /* Disable HW interrupts, NAPI */
2037 bnx2x_netif_stop(bp, 1);
2038
2039 /* Release IRQs */
2040 bnx2x_free_irq(bp);
2041
2042 /* Report UNLOAD_DONE to MCP */
2043 bnx2x_send_unload_done(bp);
2044 }
2045
2046 /*
2047 * At this stage no more interrupts will arrive so we may safly clean
2048 * the queueable objects here in case they failed to get cleaned so far.
2049 */
2050 bnx2x_squeeze_objects(bp);
2051
2052 /* There should be no more pending SP commands at this stage */
2053 bp->sp_state = 0;
2054
2055 bp->port.pmf = 0;
2056
2057 /* Free SKBs, SGEs, TPA pool and driver internals */
2058 bnx2x_free_skbs(bp);
2059 for_each_rx_queue(bp, i)
2060 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2061
2062 bnx2x_free_mem(bp);
2063
2064 bp->state = BNX2X_STATE_CLOSED;
2065
2066 /* Check if there are pending parity attentions. If there are - set
2067 * RECOVERY_IN_PROGRESS.
2068 */
2069 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2070 bnx2x_set_reset_in_progress(bp);
2071
2072 /* Set RESET_IS_GLOBAL if needed */
2073 if (global)
2074 bnx2x_set_reset_global(bp);
2075 }
2076
2077
2078 /* The last driver must disable a "close the gate" if there is no
2079 * parity attention or "process kill" pending.
2080 */
2081 if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2082 bnx2x_disable_close_the_gate(bp);
2083
2084 return 0;
2085 }
2086
2087 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2088 {
2089 u16 pmcsr;
2090
2091 /* If there is no power capability, silently succeed */
2092 if (!bp->pm_cap) {
2093 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2094 return 0;
2095 }
2096
2097 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2098
2099 switch (state) {
2100 case PCI_D0:
2101 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2102 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2103 PCI_PM_CTRL_PME_STATUS));
2104
2105 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2106 /* delay required during transition out of D3hot */
2107 msleep(20);
2108 break;
2109
2110 case PCI_D3hot:
2111 /* If there are other clients above don't
2112 shut down the power */
2113 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2114 return 0;
2115 /* Don't shut down the power for emulation and FPGA */
2116 if (CHIP_REV_IS_SLOW(bp))
2117 return 0;
2118
2119 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2120 pmcsr |= 3;
2121
2122 if (bp->wol)
2123 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2124
2125 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2126 pmcsr);
2127
2128 /* No more memory access after this point until
2129 * device is brought back to D0.
2130 */
2131 break;
2132
2133 default:
2134 return -EINVAL;
2135 }
2136 return 0;
2137 }
2138
2139 /*
2140 * net_device service functions
2141 */
2142 int bnx2x_poll(struct napi_struct *napi, int budget)
2143 {
2144 int work_done = 0;
2145 u8 cos;
2146 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2147 napi);
2148 struct bnx2x *bp = fp->bp;
2149
2150 while (1) {
2151 #ifdef BNX2X_STOP_ON_ERROR
2152 if (unlikely(bp->panic)) {
2153 napi_complete(napi);
2154 return 0;
2155 }
2156 #endif
2157
2158 for_each_cos_in_tx_queue(fp, cos)
2159 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2160 bnx2x_tx_int(bp, &fp->txdata[cos]);
2161
2162
2163 if (bnx2x_has_rx_work(fp)) {
2164 work_done += bnx2x_rx_int(fp, budget - work_done);
2165
2166 /* must not complete if we consumed full budget */
2167 if (work_done >= budget)
2168 break;
2169 }
2170
2171 /* Fall out from the NAPI loop if needed */
2172 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2173 #ifdef BCM_CNIC
2174 /* No need to update SB for FCoE L2 ring as long as
2175 * it's connected to the default SB and the SB
2176 * has been updated when NAPI was scheduled.
2177 */
2178 if (IS_FCOE_FP(fp)) {
2179 napi_complete(napi);
2180 break;
2181 }
2182 #endif
2183
2184 bnx2x_update_fpsb_idx(fp);
2185 /* bnx2x_has_rx_work() reads the status block,
2186 * thus we need to ensure that status block indices
2187 * have been actually read (bnx2x_update_fpsb_idx)
2188 * prior to this check (bnx2x_has_rx_work) so that
2189 * we won't write the "newer" value of the status block
2190 * to IGU (if there was a DMA right after
2191 * bnx2x_has_rx_work and if there is no rmb, the memory
2192 * reading (bnx2x_update_fpsb_idx) may be postponed
2193 * to right before bnx2x_ack_sb). In this case there
2194 * will never be another interrupt until there is
2195 * another update of the status block, while there
2196 * is still unhandled work.
2197 */
2198 rmb();
2199
2200 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2201 napi_complete(napi);
2202 /* Re-enable interrupts */
2203 DP(NETIF_MSG_HW,
2204 "Update index to %d\n", fp->fp_hc_idx);
2205 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2206 le16_to_cpu(fp->fp_hc_idx),
2207 IGU_INT_ENABLE, 1);
2208 break;
2209 }
2210 }
2211 }
2212
2213 return work_done;
2214 }
2215
2216 /* we split the first BD into headers and data BDs
2217 * to ease the pain of our fellow microcode engineers
2218 * we use one mapping for both BDs
2219 * So far this has only been observed to happen
2220 * in Other Operating Systems(TM)
2221 */
2222 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2223 struct bnx2x_fp_txdata *txdata,
2224 struct sw_tx_bd *tx_buf,
2225 struct eth_tx_start_bd **tx_bd, u16 hlen,
2226 u16 bd_prod, int nbd)
2227 {
2228 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2229 struct eth_tx_bd *d_tx_bd;
2230 dma_addr_t mapping;
2231 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2232
2233 /* first fix first BD */
2234 h_tx_bd->nbd = cpu_to_le16(nbd);
2235 h_tx_bd->nbytes = cpu_to_le16(hlen);
2236
2237 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2238 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2239 h_tx_bd->addr_lo, h_tx_bd->nbd);
2240
2241 /* now get a new data BD
2242 * (after the pbd) and fill it */
2243 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2244 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2245
2246 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2247 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2248
2249 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2250 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2251 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2252
2253 /* this marks the BD as one that has no individual mapping */
2254 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2255
2256 DP(NETIF_MSG_TX_QUEUED,
2257 "TSO split data size is %d (%x:%x)\n",
2258 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2259
2260 /* update tx_bd */
2261 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2262
2263 return bd_prod;
2264 }
2265
2266 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2267 {
2268 if (fix > 0)
2269 csum = (u16) ~csum_fold(csum_sub(csum,
2270 csum_partial(t_header - fix, fix, 0)));
2271
2272 else if (fix < 0)
2273 csum = (u16) ~csum_fold(csum_add(csum,
2274 csum_partial(t_header, -fix, 0)));
2275
2276 return swab16(csum);
2277 }
2278
2279 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2280 {
2281 u32 rc;
2282
2283 if (skb->ip_summed != CHECKSUM_PARTIAL)
2284 rc = XMIT_PLAIN;
2285
2286 else {
2287 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2288 rc = XMIT_CSUM_V6;
2289 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2290 rc |= XMIT_CSUM_TCP;
2291
2292 } else {
2293 rc = XMIT_CSUM_V4;
2294 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2295 rc |= XMIT_CSUM_TCP;
2296 }
2297 }
2298
2299 if (skb_is_gso_v6(skb))
2300 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2301 else if (skb_is_gso(skb))
2302 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2303
2304 return rc;
2305 }
2306
2307 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2308 /* check if packet requires linearization (packet is too fragmented)
2309 no need to check fragmentation if page size > 8K (there will be no
2310 violation to FW restrictions) */
2311 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2312 u32 xmit_type)
2313 {
2314 int to_copy = 0;
2315 int hlen = 0;
2316 int first_bd_sz = 0;
2317
2318 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2319 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2320
2321 if (xmit_type & XMIT_GSO) {
2322 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2323 /* Check if LSO packet needs to be copied:
2324 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2325 int wnd_size = MAX_FETCH_BD - 3;
2326 /* Number of windows to check */
2327 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2328 int wnd_idx = 0;
2329 int frag_idx = 0;
2330 u32 wnd_sum = 0;
2331
2332 /* Headers length */
2333 hlen = (int)(skb_transport_header(skb) - skb->data) +
2334 tcp_hdrlen(skb);
2335
2336 /* Amount of data (w/o headers) on linear part of SKB*/
2337 first_bd_sz = skb_headlen(skb) - hlen;
2338
2339 wnd_sum = first_bd_sz;
2340
2341 /* Calculate the first sum - it's special */
2342 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2343 wnd_sum +=
2344 skb_shinfo(skb)->frags[frag_idx].size;
2345
2346 /* If there was data on linear skb data - check it */
2347 if (first_bd_sz > 0) {
2348 if (unlikely(wnd_sum < lso_mss)) {
2349 to_copy = 1;
2350 goto exit_lbl;
2351 }
2352
2353 wnd_sum -= first_bd_sz;
2354 }
2355
2356 /* Others are easier: run through the frag list and
2357 check all windows */
2358 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2359 wnd_sum +=
2360 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2361
2362 if (unlikely(wnd_sum < lso_mss)) {
2363 to_copy = 1;
2364 break;
2365 }
2366 wnd_sum -=
2367 skb_shinfo(skb)->frags[wnd_idx].size;
2368 }
2369 } else {
2370 /* in non-LSO too fragmented packet should always
2371 be linearized */
2372 to_copy = 1;
2373 }
2374 }
2375
2376 exit_lbl:
2377 if (unlikely(to_copy))
2378 DP(NETIF_MSG_TX_QUEUED,
2379 "Linearization IS REQUIRED for %s packet. "
2380 "num_frags %d hlen %d first_bd_sz %d\n",
2381 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2382 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2383
2384 return to_copy;
2385 }
2386 #endif
2387
2388 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2389 u32 xmit_type)
2390 {
2391 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2392 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2393 ETH_TX_PARSE_BD_E2_LSO_MSS;
2394 if ((xmit_type & XMIT_GSO_V6) &&
2395 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2396 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2397 }
2398
2399 /**
2400 * bnx2x_set_pbd_gso - update PBD in GSO case.
2401 *
2402 * @skb: packet skb
2403 * @pbd: parse BD
2404 * @xmit_type: xmit flags
2405 */
2406 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2407 struct eth_tx_parse_bd_e1x *pbd,
2408 u32 xmit_type)
2409 {
2410 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2411 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2412 pbd->tcp_flags = pbd_tcp_flags(skb);
2413
2414 if (xmit_type & XMIT_GSO_V4) {
2415 pbd->ip_id = swab16(ip_hdr(skb)->id);
2416 pbd->tcp_pseudo_csum =
2417 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2418 ip_hdr(skb)->daddr,
2419 0, IPPROTO_TCP, 0));
2420
2421 } else
2422 pbd->tcp_pseudo_csum =
2423 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2424 &ipv6_hdr(skb)->daddr,
2425 0, IPPROTO_TCP, 0));
2426
2427 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2428 }
2429
2430 /**
2431 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2432 *
2433 * @bp: driver handle
2434 * @skb: packet skb
2435 * @parsing_data: data to be updated
2436 * @xmit_type: xmit flags
2437 *
2438 * 57712 related
2439 */
2440 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2441 u32 *parsing_data, u32 xmit_type)
2442 {
2443 *parsing_data |=
2444 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2445 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2446 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2447
2448 if (xmit_type & XMIT_CSUM_TCP) {
2449 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2450 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2451 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2452
2453 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2454 } else
2455 /* We support checksum offload for TCP and UDP only.
2456 * No need to pass the UDP header length - it's a constant.
2457 */
2458 return skb_transport_header(skb) +
2459 sizeof(struct udphdr) - skb->data;
2460 }
2461
2462 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2463 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2464 {
2465 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2466
2467 if (xmit_type & XMIT_CSUM_V4)
2468 tx_start_bd->bd_flags.as_bitfield |=
2469 ETH_TX_BD_FLAGS_IP_CSUM;
2470 else
2471 tx_start_bd->bd_flags.as_bitfield |=
2472 ETH_TX_BD_FLAGS_IPV6;
2473
2474 if (!(xmit_type & XMIT_CSUM_TCP))
2475 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2476 }
2477
2478 /**
2479 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2480 *
2481 * @bp: driver handle
2482 * @skb: packet skb
2483 * @pbd: parse BD to be updated
2484 * @xmit_type: xmit flags
2485 */
2486 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2487 struct eth_tx_parse_bd_e1x *pbd,
2488 u32 xmit_type)
2489 {
2490 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2491
2492 /* for now NS flag is not used in Linux */
2493 pbd->global_data =
2494 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2495 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2496
2497 pbd->ip_hlen_w = (skb_transport_header(skb) -
2498 skb_network_header(skb)) >> 1;
2499
2500 hlen += pbd->ip_hlen_w;
2501
2502 /* We support checksum offload for TCP and UDP only */
2503 if (xmit_type & XMIT_CSUM_TCP)
2504 hlen += tcp_hdrlen(skb) / 2;
2505 else
2506 hlen += sizeof(struct udphdr) / 2;
2507
2508 pbd->total_hlen_w = cpu_to_le16(hlen);
2509 hlen = hlen*2;
2510
2511 if (xmit_type & XMIT_CSUM_TCP) {
2512 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2513
2514 } else {
2515 s8 fix = SKB_CS_OFF(skb); /* signed! */
2516
2517 DP(NETIF_MSG_TX_QUEUED,
2518 "hlen %d fix %d csum before fix %x\n",
2519 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2520
2521 /* HW bug: fixup the CSUM */
2522 pbd->tcp_pseudo_csum =
2523 bnx2x_csum_fix(skb_transport_header(skb),
2524 SKB_CS(skb), fix);
2525
2526 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2527 pbd->tcp_pseudo_csum);
2528 }
2529
2530 return hlen;
2531 }
2532
2533 /* called with netif_tx_lock
2534 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2535 * netif_wake_queue()
2536 */
2537 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2538 {
2539 struct bnx2x *bp = netdev_priv(dev);
2540
2541 struct bnx2x_fastpath *fp;
2542 struct netdev_queue *txq;
2543 struct bnx2x_fp_txdata *txdata;
2544 struct sw_tx_bd *tx_buf;
2545 struct eth_tx_start_bd *tx_start_bd, *first_bd;
2546 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2547 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2548 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2549 u32 pbd_e2_parsing_data = 0;
2550 u16 pkt_prod, bd_prod;
2551 int nbd, txq_index, fp_index, txdata_index;
2552 dma_addr_t mapping;
2553 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2554 int i;
2555 u8 hlen = 0;
2556 __le16 pkt_size = 0;
2557 struct ethhdr *eth;
2558 u8 mac_type = UNICAST_ADDRESS;
2559
2560 #ifdef BNX2X_STOP_ON_ERROR
2561 if (unlikely(bp->panic))
2562 return NETDEV_TX_BUSY;
2563 #endif
2564
2565 txq_index = skb_get_queue_mapping(skb);
2566 txq = netdev_get_tx_queue(dev, txq_index);
2567
2568 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2569
2570 /* decode the fastpath index and the cos index from the txq */
2571 fp_index = TXQ_TO_FP(txq_index);
2572 txdata_index = TXQ_TO_COS(txq_index);
2573
2574 #ifdef BCM_CNIC
2575 /*
2576 * Override the above for the FCoE queue:
2577 * - FCoE fp entry is right after the ETH entries.
2578 * - FCoE L2 queue uses bp->txdata[0] only.
2579 */
2580 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2581 bnx2x_fcoe_tx(bp, txq_index)))) {
2582 fp_index = FCOE_IDX;
2583 txdata_index = 0;
2584 }
2585 #endif
2586
2587 /* enable this debug print to view the transmission queue being used
2588 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d",
2589 txq_index, fp_index, txdata_index); */
2590
2591 /* locate the fastpath and the txdata */
2592 fp = &bp->fp[fp_index];
2593 txdata = &fp->txdata[txdata_index];
2594
2595 /* enable this debug print to view the tranmission details
2596 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
2597 " tx_data ptr %p fp pointer %p",
2598 txdata->cid, fp_index, txdata_index, txdata, fp); */
2599
2600 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2601 (skb_shinfo(skb)->nr_frags + 3))) {
2602 fp->eth_q_stats.driver_xoff++;
2603 netif_tx_stop_queue(txq);
2604 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2605 return NETDEV_TX_BUSY;
2606 }
2607
2608 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2609 "protocol(%x,%x) gso type %x xmit_type %x\n",
2610 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2611 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2612
2613 eth = (struct ethhdr *)skb->data;
2614
2615 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2616 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2617 if (is_broadcast_ether_addr(eth->h_dest))
2618 mac_type = BROADCAST_ADDRESS;
2619 else
2620 mac_type = MULTICAST_ADDRESS;
2621 }
2622
2623 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2624 /* First, check if we need to linearize the skb (due to FW
2625 restrictions). No need to check fragmentation if page size > 8K
2626 (there will be no violation to FW restrictions) */
2627 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2628 /* Statistics of linearization */
2629 bp->lin_cnt++;
2630 if (skb_linearize(skb) != 0) {
2631 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2632 "silently dropping this SKB\n");
2633 dev_kfree_skb_any(skb);
2634 return NETDEV_TX_OK;
2635 }
2636 }
2637 #endif
2638 /* Map skb linear data for DMA */
2639 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2640 skb_headlen(skb), DMA_TO_DEVICE);
2641 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2642 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2643 "silently dropping this SKB\n");
2644 dev_kfree_skb_any(skb);
2645 return NETDEV_TX_OK;
2646 }
2647 /*
2648 Please read carefully. First we use one BD which we mark as start,
2649 then we have a parsing info BD (used for TSO or xsum),
2650 and only then we have the rest of the TSO BDs.
2651 (don't forget to mark the last one as last,
2652 and to unmap only AFTER you write to the BD ...)
2653 And above all, all pdb sizes are in words - NOT DWORDS!
2654 */
2655
2656 /* get current pkt produced now - advance it just before sending packet
2657 * since mapping of pages may fail and cause packet to be dropped
2658 */
2659 pkt_prod = txdata->tx_pkt_prod;
2660 bd_prod = TX_BD(txdata->tx_bd_prod);
2661
2662 /* get a tx_buf and first BD
2663 * tx_start_bd may be changed during SPLIT,
2664 * but first_bd will always stay first
2665 */
2666 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2667 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
2668 first_bd = tx_start_bd;
2669
2670 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2671 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2672 mac_type);
2673
2674 /* header nbd */
2675 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2676
2677 /* remember the first BD of the packet */
2678 tx_buf->first_bd = txdata->tx_bd_prod;
2679 tx_buf->skb = skb;
2680 tx_buf->flags = 0;
2681
2682 DP(NETIF_MSG_TX_QUEUED,
2683 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2684 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
2685
2686 if (vlan_tx_tag_present(skb)) {
2687 tx_start_bd->vlan_or_ethertype =
2688 cpu_to_le16(vlan_tx_tag_get(skb));
2689 tx_start_bd->bd_flags.as_bitfield |=
2690 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2691 } else
2692 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2693
2694 /* turn on parsing and get a BD */
2695 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2696
2697 if (xmit_type & XMIT_CSUM)
2698 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2699
2700 if (!CHIP_IS_E1x(bp)) {
2701 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2702 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2703 /* Set PBD in checksum offload case */
2704 if (xmit_type & XMIT_CSUM)
2705 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2706 &pbd_e2_parsing_data,
2707 xmit_type);
2708 if (IS_MF_SI(bp)) {
2709 /*
2710 * fill in the MAC addresses in the PBD - for local
2711 * switching
2712 */
2713 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2714 &pbd_e2->src_mac_addr_mid,
2715 &pbd_e2->src_mac_addr_lo,
2716 eth->h_source);
2717 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2718 &pbd_e2->dst_mac_addr_mid,
2719 &pbd_e2->dst_mac_addr_lo,
2720 eth->h_dest);
2721 }
2722 } else {
2723 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
2724 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2725 /* Set PBD in checksum offload case */
2726 if (xmit_type & XMIT_CSUM)
2727 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2728
2729 }
2730
2731 /* Setup the data pointer of the first BD of the packet */
2732 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2733 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2734 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
2735 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2736 pkt_size = tx_start_bd->nbytes;
2737
2738 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2739 " nbytes %d flags %x vlan %x\n",
2740 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2741 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2742 tx_start_bd->bd_flags.as_bitfield,
2743 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2744
2745 if (xmit_type & XMIT_GSO) {
2746
2747 DP(NETIF_MSG_TX_QUEUED,
2748 "TSO packet len %d hlen %d total len %d tso size %d\n",
2749 skb->len, hlen, skb_headlen(skb),
2750 skb_shinfo(skb)->gso_size);
2751
2752 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2753
2754 if (unlikely(skb_headlen(skb) > hlen))
2755 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2756 &tx_start_bd, hlen,
2757 bd_prod, ++nbd);
2758 if (!CHIP_IS_E1x(bp))
2759 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2760 xmit_type);
2761 else
2762 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2763 }
2764
2765 /* Set the PBD's parsing_data field if not zero
2766 * (for the chips newer than 57711).
2767 */
2768 if (pbd_e2_parsing_data)
2769 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2770
2771 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2772
2773 /* Handle fragmented skb */
2774 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2775 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2776
2777 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2778 frag->page_offset, frag->size,
2779 DMA_TO_DEVICE);
2780 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2781
2782 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2783 "dropping packet...\n");
2784
2785 /* we need unmap all buffers already mapped
2786 * for this SKB;
2787 * first_bd->nbd need to be properly updated
2788 * before call to bnx2x_free_tx_pkt
2789 */
2790 first_bd->nbd = cpu_to_le16(nbd);
2791 bnx2x_free_tx_pkt(bp, txdata,
2792 TX_BD(txdata->tx_pkt_prod));
2793 return NETDEV_TX_OK;
2794 }
2795
2796 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2797 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2798 if (total_pkt_bd == NULL)
2799 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2800
2801 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2802 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2803 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2804 le16_add_cpu(&pkt_size, frag->size);
2805 nbd++;
2806
2807 DP(NETIF_MSG_TX_QUEUED,
2808 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2809 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2810 le16_to_cpu(tx_data_bd->nbytes));
2811 }
2812
2813 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2814
2815 /* update with actual num BDs */
2816 first_bd->nbd = cpu_to_le16(nbd);
2817
2818 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2819
2820 /* now send a tx doorbell, counting the next BD
2821 * if the packet contains or ends with it
2822 */
2823 if (TX_BD_POFF(bd_prod) < nbd)
2824 nbd++;
2825
2826 /* total_pkt_bytes should be set on the first data BD if
2827 * it's not an LSO packet and there is more than one
2828 * data BD. In this case pkt_size is limited by an MTU value.
2829 * However we prefer to set it for an LSO packet (while we don't
2830 * have to) in order to save some CPU cycles in a none-LSO
2831 * case, when we much more care about them.
2832 */
2833 if (total_pkt_bd != NULL)
2834 total_pkt_bd->total_pkt_bytes = pkt_size;
2835
2836 if (pbd_e1x)
2837 DP(NETIF_MSG_TX_QUEUED,
2838 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2839 " tcp_flags %x xsum %x seq %u hlen %u\n",
2840 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2841 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2842 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2843 le16_to_cpu(pbd_e1x->total_hlen_w));
2844 if (pbd_e2)
2845 DP(NETIF_MSG_TX_QUEUED,
2846 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2847 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2848 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2849 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2850 pbd_e2->parsing_data);
2851 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2852
2853 txdata->tx_pkt_prod++;
2854 /*
2855 * Make sure that the BD data is updated before updating the producer
2856 * since FW might read the BD right after the producer is updated.
2857 * This is only applicable for weak-ordered memory model archs such
2858 * as IA-64. The following barrier is also mandatory since FW will
2859 * assumes packets must have BDs.
2860 */
2861 wmb();
2862
2863 txdata->tx_db.data.prod += nbd;
2864 barrier();
2865
2866 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
2867
2868 mmiowb();
2869
2870 txdata->tx_bd_prod += nbd;
2871
2872 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
2873 netif_tx_stop_queue(txq);
2874
2875 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2876 * ordering of set_bit() in netif_tx_stop_queue() and read of
2877 * fp->bd_tx_cons */
2878 smp_mb();
2879
2880 fp->eth_q_stats.driver_xoff++;
2881 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
2882 netif_tx_wake_queue(txq);
2883 }
2884 txdata->tx_pkt++;
2885
2886 return NETDEV_TX_OK;
2887 }
2888
2889 /**
2890 * bnx2x_setup_tc - routine to configure net_device for multi tc
2891 *
2892 * @netdev: net device to configure
2893 * @tc: number of traffic classes to enable
2894 *
2895 * callback connected to the ndo_setup_tc function pointer
2896 */
2897 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
2898 {
2899 int cos, prio, count, offset;
2900 struct bnx2x *bp = netdev_priv(dev);
2901
2902 /* setup tc must be called under rtnl lock */
2903 ASSERT_RTNL();
2904
2905 /* no traffic classes requested. aborting */
2906 if (!num_tc) {
2907 netdev_reset_tc(dev);
2908 return 0;
2909 }
2910
2911 /* requested to support too many traffic classes */
2912 if (num_tc > bp->max_cos) {
2913 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
2914 " requested: %d. max supported is %d",
2915 num_tc, bp->max_cos);
2916 return -EINVAL;
2917 }
2918
2919 /* declare amount of supported traffic classes */
2920 if (netdev_set_num_tc(dev, num_tc)) {
2921 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes",
2922 num_tc);
2923 return -EINVAL;
2924 }
2925
2926 /* configure priority to traffic class mapping */
2927 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
2928 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
2929 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d",
2930 prio, bp->prio_to_cos[prio]);
2931 }
2932
2933
2934 /* Use this configuration to diffrentiate tc0 from other COSes
2935 This can be used for ets or pfc, and save the effort of setting
2936 up a multio class queue disc or negotiating DCBX with a switch
2937 netdev_set_prio_tc_map(dev, 0, 0);
2938 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", 0, 0);
2939 for (prio = 1; prio < 16; prio++) {
2940 netdev_set_prio_tc_map(dev, prio, 1);
2941 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", prio, 1);
2942 } */
2943
2944 /* configure traffic class to transmission queue mapping */
2945 for (cos = 0; cos < bp->max_cos; cos++) {
2946 count = BNX2X_NUM_ETH_QUEUES(bp);
2947 offset = cos * MAX_TXQS_PER_COS;
2948 netdev_set_tc_queue(dev, cos, count, offset);
2949 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d",
2950 cos, offset, count);
2951 }
2952
2953 return 0;
2954 }
2955
2956 /* called with rtnl_lock */
2957 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2958 {
2959 struct sockaddr *addr = p;
2960 struct bnx2x *bp = netdev_priv(dev);
2961 int rc = 0;
2962
2963 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2964 return -EINVAL;
2965
2966 if (netif_running(dev)) {
2967 rc = bnx2x_set_eth_mac(bp, false);
2968 if (rc)
2969 return rc;
2970 }
2971
2972 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2973
2974 if (netif_running(dev))
2975 rc = bnx2x_set_eth_mac(bp, true);
2976
2977 return rc;
2978 }
2979
2980 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2981 {
2982 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2983 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
2984 u8 cos;
2985
2986 /* Common */
2987 #ifdef BCM_CNIC
2988 if (IS_FCOE_IDX(fp_index)) {
2989 memset(sb, 0, sizeof(union host_hc_status_block));
2990 fp->status_blk_mapping = 0;
2991
2992 } else {
2993 #endif
2994 /* status blocks */
2995 if (!CHIP_IS_E1x(bp))
2996 BNX2X_PCI_FREE(sb->e2_sb,
2997 bnx2x_fp(bp, fp_index,
2998 status_blk_mapping),
2999 sizeof(struct host_hc_status_block_e2));
3000 else
3001 BNX2X_PCI_FREE(sb->e1x_sb,
3002 bnx2x_fp(bp, fp_index,
3003 status_blk_mapping),
3004 sizeof(struct host_hc_status_block_e1x));
3005 #ifdef BCM_CNIC
3006 }
3007 #endif
3008 /* Rx */
3009 if (!skip_rx_queue(bp, fp_index)) {
3010 bnx2x_free_rx_bds(fp);
3011
3012 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3013 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3014 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3015 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3016 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3017
3018 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3019 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3020 sizeof(struct eth_fast_path_rx_cqe) *
3021 NUM_RCQ_BD);
3022
3023 /* SGE ring */
3024 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3025 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3026 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3027 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3028 }
3029
3030 /* Tx */
3031 if (!skip_tx_queue(bp, fp_index)) {
3032 /* fastpath tx rings: tx_buf tx_desc */
3033 for_each_cos_in_tx_queue(fp, cos) {
3034 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3035
3036 DP(BNX2X_MSG_SP,
3037 "freeing tx memory of fp %d cos %d cid %d",
3038 fp_index, cos, txdata->cid);
3039
3040 BNX2X_FREE(txdata->tx_buf_ring);
3041 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3042 txdata->tx_desc_mapping,
3043 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3044 }
3045 }
3046 /* end of fastpath */
3047 }
3048
3049 void bnx2x_free_fp_mem(struct bnx2x *bp)
3050 {
3051 int i;
3052 for_each_queue(bp, i)
3053 bnx2x_free_fp_mem_at(bp, i);
3054 }
3055
3056 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3057 {
3058 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3059 if (!CHIP_IS_E1x(bp)) {
3060 bnx2x_fp(bp, index, sb_index_values) =
3061 (__le16 *)status_blk.e2_sb->sb.index_values;
3062 bnx2x_fp(bp, index, sb_running_index) =
3063 (__le16 *)status_blk.e2_sb->sb.running_index;
3064 } else {
3065 bnx2x_fp(bp, index, sb_index_values) =
3066 (__le16 *)status_blk.e1x_sb->sb.index_values;
3067 bnx2x_fp(bp, index, sb_running_index) =
3068 (__le16 *)status_blk.e1x_sb->sb.running_index;
3069 }
3070 }
3071
3072 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3073 {
3074 union host_hc_status_block *sb;
3075 struct bnx2x_fastpath *fp = &bp->fp[index];
3076 int ring_size = 0;
3077 u8 cos;
3078
3079 /* if rx_ring_size specified - use it */
3080 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
3081 MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3082
3083 /* allocate at least number of buffers required by FW */
3084 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3085 MIN_RX_SIZE_TPA,
3086 rx_ring_size);
3087
3088 /* Common */
3089 sb = &bnx2x_fp(bp, index, status_blk);
3090 #ifdef BCM_CNIC
3091 if (!IS_FCOE_IDX(index)) {
3092 #endif
3093 /* status blocks */
3094 if (!CHIP_IS_E1x(bp))
3095 BNX2X_PCI_ALLOC(sb->e2_sb,
3096 &bnx2x_fp(bp, index, status_blk_mapping),
3097 sizeof(struct host_hc_status_block_e2));
3098 else
3099 BNX2X_PCI_ALLOC(sb->e1x_sb,
3100 &bnx2x_fp(bp, index, status_blk_mapping),
3101 sizeof(struct host_hc_status_block_e1x));
3102 #ifdef BCM_CNIC
3103 }
3104 #endif
3105
3106 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3107 * set shortcuts for it.
3108 */
3109 if (!IS_FCOE_IDX(index))
3110 set_sb_shortcuts(bp, index);
3111
3112 /* Tx */
3113 if (!skip_tx_queue(bp, index)) {
3114 /* fastpath tx rings: tx_buf tx_desc */
3115 for_each_cos_in_tx_queue(fp, cos) {
3116 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3117
3118 DP(BNX2X_MSG_SP, "allocating tx memory of "
3119 "fp %d cos %d",
3120 index, cos);
3121
3122 BNX2X_ALLOC(txdata->tx_buf_ring,
3123 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3124 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3125 &txdata->tx_desc_mapping,
3126 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3127 }
3128 }
3129
3130 /* Rx */
3131 if (!skip_rx_queue(bp, index)) {
3132 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3133 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3134 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3135 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3136 &bnx2x_fp(bp, index, rx_desc_mapping),
3137 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3138
3139 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3140 &bnx2x_fp(bp, index, rx_comp_mapping),
3141 sizeof(struct eth_fast_path_rx_cqe) *
3142 NUM_RCQ_BD);
3143
3144 /* SGE ring */
3145 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3146 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3147 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3148 &bnx2x_fp(bp, index, rx_sge_mapping),
3149 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3150 /* RX BD ring */
3151 bnx2x_set_next_page_rx_bd(fp);
3152
3153 /* CQ ring */
3154 bnx2x_set_next_page_rx_cq(fp);
3155
3156 /* BDs */
3157 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3158 if (ring_size < rx_ring_size)
3159 goto alloc_mem_err;
3160 }
3161
3162 return 0;
3163
3164 /* handles low memory cases */
3165 alloc_mem_err:
3166 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3167 index, ring_size);
3168 /* FW will drop all packets if queue is not big enough,
3169 * In these cases we disable the queue
3170 * Min size is different for OOO, TPA and non-TPA queues
3171 */
3172 if (ring_size < (fp->disable_tpa ?
3173 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3174 /* release memory allocated for this queue */
3175 bnx2x_free_fp_mem_at(bp, index);
3176 return -ENOMEM;
3177 }
3178 return 0;
3179 }
3180
3181 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3182 {
3183 int i;
3184
3185 /**
3186 * 1. Allocate FP for leading - fatal if error
3187 * 2. {CNIC} Allocate FCoE FP - fatal if error
3188 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3189 * 4. Allocate RSS - fix number of queues if error
3190 */
3191
3192 /* leading */
3193 if (bnx2x_alloc_fp_mem_at(bp, 0))
3194 return -ENOMEM;
3195
3196 #ifdef BCM_CNIC
3197 if (!NO_FCOE(bp))
3198 /* FCoE */
3199 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3200 /* we will fail load process instead of mark
3201 * NO_FCOE_FLAG
3202 */
3203 return -ENOMEM;
3204 #endif
3205
3206 /* RSS */
3207 for_each_nondefault_eth_queue(bp, i)
3208 if (bnx2x_alloc_fp_mem_at(bp, i))
3209 break;
3210
3211 /* handle memory failures */
3212 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3213 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3214
3215 WARN_ON(delta < 0);
3216 #ifdef BCM_CNIC
3217 /**
3218 * move non eth FPs next to last eth FP
3219 * must be done in that order
3220 * FCOE_IDX < FWD_IDX < OOO_IDX
3221 */
3222
3223 /* move FCoE fp even NO_FCOE_FLAG is on */
3224 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3225 #endif
3226 bp->num_queues -= delta;
3227 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3228 bp->num_queues + delta, bp->num_queues);
3229 }
3230
3231 return 0;
3232 }
3233
3234 void bnx2x_free_mem_bp(struct bnx2x *bp)
3235 {
3236 kfree(bp->fp);
3237 kfree(bp->msix_table);
3238 kfree(bp->ilt);
3239 }
3240
3241 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3242 {
3243 struct bnx2x_fastpath *fp;
3244 struct msix_entry *tbl;
3245 struct bnx2x_ilt *ilt;
3246 int msix_table_size = 0;
3247
3248 /*
3249 * The biggest MSI-X table we might need is as a maximum number of fast
3250 * path IGU SBs plus default SB (for PF).
3251 */
3252 msix_table_size = bp->igu_sb_cnt + 1;
3253
3254 /* fp array: RSS plus CNIC related L2 queues */
3255 fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
3256 sizeof(*fp), GFP_KERNEL);
3257 if (!fp)
3258 goto alloc_err;
3259 bp->fp = fp;
3260
3261 /* msix table */
3262 tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
3263 if (!tbl)
3264 goto alloc_err;
3265 bp->msix_table = tbl;
3266
3267 /* ilt */
3268 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3269 if (!ilt)
3270 goto alloc_err;
3271 bp->ilt = ilt;
3272
3273 return 0;
3274 alloc_err:
3275 bnx2x_free_mem_bp(bp);
3276 return -ENOMEM;
3277
3278 }
3279
3280 int bnx2x_reload_if_running(struct net_device *dev)
3281 {
3282 struct bnx2x *bp = netdev_priv(dev);
3283
3284 if (unlikely(!netif_running(dev)))
3285 return 0;
3286
3287 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3288 return bnx2x_nic_load(bp, LOAD_NORMAL);
3289 }
3290
3291 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3292 {
3293 u32 sel_phy_idx = 0;
3294 if (bp->link_params.num_phys <= 1)
3295 return INT_PHY;
3296
3297 if (bp->link_vars.link_up) {
3298 sel_phy_idx = EXT_PHY1;
3299 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3300 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3301 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3302 sel_phy_idx = EXT_PHY2;
3303 } else {
3304
3305 switch (bnx2x_phy_selection(&bp->link_params)) {
3306 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3307 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3308 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3309 sel_phy_idx = EXT_PHY1;
3310 break;
3311 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3312 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3313 sel_phy_idx = EXT_PHY2;
3314 break;
3315 }
3316 }
3317
3318 return sel_phy_idx;
3319
3320 }
3321 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3322 {
3323 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3324 /*
3325 * The selected actived PHY is always after swapping (in case PHY
3326 * swapping is enabled). So when swapping is enabled, we need to reverse
3327 * the configuration
3328 */
3329
3330 if (bp->link_params.multi_phy_config &
3331 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3332 if (sel_phy_idx == EXT_PHY1)
3333 sel_phy_idx = EXT_PHY2;
3334 else if (sel_phy_idx == EXT_PHY2)
3335 sel_phy_idx = EXT_PHY1;
3336 }
3337 return LINK_CONFIG_IDX(sel_phy_idx);
3338 }
3339
3340 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3341 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3342 {
3343 struct bnx2x *bp = netdev_priv(dev);
3344 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3345
3346 switch (type) {
3347 case NETDEV_FCOE_WWNN:
3348 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3349 cp->fcoe_wwn_node_name_lo);
3350 break;
3351 case NETDEV_FCOE_WWPN:
3352 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3353 cp->fcoe_wwn_port_name_lo);
3354 break;
3355 default:
3356 return -EINVAL;
3357 }
3358
3359 return 0;
3360 }
3361 #endif
3362
3363 /* called with rtnl_lock */
3364 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3365 {
3366 struct bnx2x *bp = netdev_priv(dev);
3367
3368 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3369 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3370 return -EAGAIN;
3371 }
3372
3373 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3374 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3375 return -EINVAL;
3376
3377 /* This does not race with packet allocation
3378 * because the actual alloc size is
3379 * only updated as part of load
3380 */
3381 dev->mtu = new_mtu;
3382
3383 return bnx2x_reload_if_running(dev);
3384 }
3385
3386 u32 bnx2x_fix_features(struct net_device *dev, u32 features)
3387 {
3388 struct bnx2x *bp = netdev_priv(dev);
3389
3390 /* TPA requires Rx CSUM offloading */
3391 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3392 features &= ~NETIF_F_LRO;
3393
3394 return features;
3395 }
3396
3397 int bnx2x_set_features(struct net_device *dev, u32 features)
3398 {
3399 struct bnx2x *bp = netdev_priv(dev);
3400 u32 flags = bp->flags;
3401 bool bnx2x_reload = false;
3402
3403 if (features & NETIF_F_LRO)
3404 flags |= TPA_ENABLE_FLAG;
3405 else
3406 flags &= ~TPA_ENABLE_FLAG;
3407
3408 if (features & NETIF_F_LOOPBACK) {
3409 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3410 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3411 bnx2x_reload = true;
3412 }
3413 } else {
3414 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3415 bp->link_params.loopback_mode = LOOPBACK_NONE;
3416 bnx2x_reload = true;
3417 }
3418 }
3419
3420 if (flags ^ bp->flags) {
3421 bp->flags = flags;
3422 bnx2x_reload = true;
3423 }
3424
3425 if (bnx2x_reload) {
3426 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3427 return bnx2x_reload_if_running(dev);
3428 /* else: bnx2x_nic_load() will be called at end of recovery */
3429 }
3430
3431 return 0;
3432 }
3433
3434 void bnx2x_tx_timeout(struct net_device *dev)
3435 {
3436 struct bnx2x *bp = netdev_priv(dev);
3437
3438 #ifdef BNX2X_STOP_ON_ERROR
3439 if (!bp->panic)
3440 bnx2x_panic();
3441 #endif
3442
3443 smp_mb__before_clear_bit();
3444 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3445 smp_mb__after_clear_bit();
3446
3447 /* This allows the netif to be shutdown gracefully before resetting */
3448 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3449 }
3450
3451 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3452 {
3453 struct net_device *dev = pci_get_drvdata(pdev);
3454 struct bnx2x *bp;
3455
3456 if (!dev) {
3457 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3458 return -ENODEV;
3459 }
3460 bp = netdev_priv(dev);
3461
3462 rtnl_lock();
3463
3464 pci_save_state(pdev);
3465
3466 if (!netif_running(dev)) {
3467 rtnl_unlock();
3468 return 0;
3469 }
3470
3471 netif_device_detach(dev);
3472
3473 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3474
3475 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3476
3477 rtnl_unlock();
3478
3479 return 0;
3480 }
3481
3482 int bnx2x_resume(struct pci_dev *pdev)
3483 {
3484 struct net_device *dev = pci_get_drvdata(pdev);
3485 struct bnx2x *bp;
3486 int rc;
3487
3488 if (!dev) {
3489 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3490 return -ENODEV;
3491 }
3492 bp = netdev_priv(dev);
3493
3494 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3495 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3496 return -EAGAIN;
3497 }
3498
3499 rtnl_lock();
3500
3501 pci_restore_state(pdev);
3502
3503 if (!netif_running(dev)) {
3504 rtnl_unlock();
3505 return 0;
3506 }
3507
3508 bnx2x_set_power_state(bp, PCI_D0);
3509 netif_device_attach(dev);
3510
3511 /* Since the chip was reset, clear the FW sequence number */
3512 bp->fw_seq = 0;
3513 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3514
3515 rtnl_unlock();
3516
3517 return rc;
3518 }
3519
3520
3521 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3522 u32 cid)
3523 {
3524 /* ustorm cxt validation */
3525 cxt->ustorm_ag_context.cdu_usage =
3526 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3527 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3528 /* xcontext validation */
3529 cxt->xstorm_ag_context.cdu_reserved =
3530 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3531 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3532 }
3533
3534 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3535 u8 fw_sb_id, u8 sb_index,
3536 u8 ticks)
3537 {
3538
3539 u32 addr = BAR_CSTRORM_INTMEM +
3540 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3541 REG_WR8(bp, addr, ticks);
3542 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3543 port, fw_sb_id, sb_index, ticks);
3544 }
3545
3546 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3547 u16 fw_sb_id, u8 sb_index,
3548 u8 disable)
3549 {
3550 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3551 u32 addr = BAR_CSTRORM_INTMEM +
3552 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3553 u16 flags = REG_RD16(bp, addr);
3554 /* clear and set */
3555 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3556 flags |= enable_flag;
3557 REG_WR16(bp, addr, flags);
3558 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3559 port, fw_sb_id, sb_index, disable);
3560 }
3561
3562 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3563 u8 sb_index, u8 disable, u16 usec)
3564 {
3565 int port = BP_PORT(bp);
3566 u8 ticks = usec / BNX2X_BTR;
3567
3568 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3569
3570 disable = disable ? 1 : (usec ? 0 : 1);
3571 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3572 }