1 /* bnx2x_cmn.h: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
20 #include <linux/types.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/irq.h>
27 #include "bnx2x_sriov.h"
29 /* This is used as a replacement for an MCP if it's not present */
30 extern int bnx2x_load_count
[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
31 extern int bnx2x_num_queues
;
33 /************************ Macros ********************************/
34 #define BNX2X_PCI_FREE(x, y, size) \
37 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
43 #define BNX2X_FREE(x) \
51 #define BNX2X_PCI_ALLOC(y, size) \
53 void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
56 "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
57 (unsigned long long)(*y), x); \
60 #define BNX2X_PCI_FALLOC(y, size) \
62 void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
64 memset(x, 0xff, size); \
66 "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n", \
67 (unsigned long long)(*y), x); \
72 /*********************** Interfaces ****************************
73 * Functions that need to be implemented by each driver version
78 * bnx2x_send_unload_req - request unload mode from the MCP.
81 * @unload_mode: requested function's unload mode
83 * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
85 u32
bnx2x_send_unload_req(struct bnx2x
*bp
, int unload_mode
);
88 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
91 * @keep_link: true iff link should be kept up
93 void bnx2x_send_unload_done(struct bnx2x
*bp
, bool keep_link
);
96 * bnx2x_config_rss_pf - configure RSS parameters in a PF.
99 * @rss_obj: RSS object to use
100 * @ind_table: indirection table to configure
101 * @config_hash: re-configure RSS hash keys configuration
102 * @enable: enabled or disabled configuration
104 int bnx2x_rss(struct bnx2x
*bp
, struct bnx2x_rss_config_obj
*rss_obj
,
105 bool config_hash
, bool enable
);
108 * bnx2x__init_func_obj - init function object
112 * Initializes the Function Object with the appropriate
113 * parameters which include a function slow path driver
116 void bnx2x__init_func_obj(struct bnx2x
*bp
);
119 * bnx2x_setup_queue - setup eth queue.
122 * @fp: pointer to the fastpath structure
126 int bnx2x_setup_queue(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
130 * bnx2x_setup_leading - bring up a leading eth queue.
134 int bnx2x_setup_leading(struct bnx2x
*bp
);
137 * bnx2x_fw_command - send the MCP a request
141 * @param: request's parameter
143 * block until there is a reply
145 u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
, u32 param
);
148 * bnx2x_initial_phy_init - initialize link parameters structure variables.
151 * @load_mode: current mode
153 int bnx2x_initial_phy_init(struct bnx2x
*bp
, int load_mode
);
156 * bnx2x_link_set - configure hw according to link parameters structure.
160 void bnx2x_link_set(struct bnx2x
*bp
);
163 * bnx2x_force_link_reset - Forces link reset, and put the PHY
168 void bnx2x_force_link_reset(struct bnx2x
*bp
);
171 * bnx2x_link_test - query link status.
176 * Returns 0 if link is UP.
178 u8
bnx2x_link_test(struct bnx2x
*bp
, u8 is_serdes
);
181 * bnx2x_drv_pulse - write driver pulse to shmem
185 * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox
188 void bnx2x_drv_pulse(struct bnx2x
*bp
);
191 * bnx2x_igu_ack_sb - update IGU with current SB value
195 * @segment: SB segment
198 * @update: is HW update required
200 void bnx2x_igu_ack_sb(struct bnx2x
*bp
, u8 igu_sb_id
, u8 segment
,
201 u16 index
, u8 op
, u8 update
);
203 /* Disable transactions from chip to host */
204 void bnx2x_pf_disable(struct bnx2x
*bp
);
205 int bnx2x_pretend_func(struct bnx2x
*bp
, u16 pretend_func_val
);
208 * bnx2x__link_status_update - handles link status change.
212 void bnx2x__link_status_update(struct bnx2x
*bp
);
215 * bnx2x_link_report - report link status to upper layer.
219 void bnx2x_link_report(struct bnx2x
*bp
);
221 /* None-atomic version of bnx2x_link_report() */
222 void __bnx2x_link_report(struct bnx2x
*bp
);
225 * bnx2x_get_mf_speed - calculate MF speed.
229 * Takes into account current linespeed and MF configuration.
231 u16
bnx2x_get_mf_speed(struct bnx2x
*bp
);
234 * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler
237 * @dev_instance: private instance
239 irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
);
242 * bnx2x_interrupt - non MSI-X interrupt handler
245 * @dev_instance: private instance
247 irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
);
250 * bnx2x_cnic_notify - send command to cnic driver
255 int bnx2x_cnic_notify(struct bnx2x
*bp
, int cmd
);
258 * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information
262 void bnx2x_setup_cnic_irq_info(struct bnx2x
*bp
);
265 * bnx2x_setup_cnic_info - provides cnic with updated info
269 void bnx2x_setup_cnic_info(struct bnx2x
*bp
);
272 * bnx2x_int_enable - enable HW interrupts.
276 void bnx2x_int_enable(struct bnx2x
*bp
);
279 * bnx2x_int_disable_sync - disable interrupts.
282 * @disable_hw: true, disable HW interrupts.
284 * This function ensures that there are no
285 * ISRs or SP DPCs (sp_task) are running after it returns.
287 void bnx2x_int_disable_sync(struct bnx2x
*bp
, int disable_hw
);
290 * bnx2x_nic_init_cnic - init driver internals for cnic.
293 * @load_code: COMMON, PORT or FUNCTION
300 void bnx2x_nic_init_cnic(struct bnx2x
*bp
);
303 * bnx2x_preirq_nic_init - init driver internals.
312 void bnx2x_pre_irq_nic_init(struct bnx2x
*bp
);
315 * bnx2x_postirq_nic_init - init driver internals.
318 * @load_code: COMMON, PORT or FUNCTION
325 void bnx2x_post_irq_nic_init(struct bnx2x
*bp
, u32 load_code
);
327 * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic.
331 int bnx2x_alloc_mem_cnic(struct bnx2x
*bp
);
333 * bnx2x_alloc_mem - allocate driver's memory.
337 int bnx2x_alloc_mem(struct bnx2x
*bp
);
340 * bnx2x_free_mem_cnic - release driver's memory for cnic.
344 void bnx2x_free_mem_cnic(struct bnx2x
*bp
);
346 * bnx2x_free_mem - release driver's memory.
350 void bnx2x_free_mem(struct bnx2x
*bp
);
353 * bnx2x_set_num_queues - set number of queues according to mode.
357 void bnx2x_set_num_queues(struct bnx2x
*bp
);
360 * bnx2x_chip_cleanup - cleanup chip internals.
363 * @unload_mode: COMMON, PORT, FUNCTION
364 * @keep_link: true iff link should be kept up.
366 * - Cleanup MAC configuration.
370 void bnx2x_chip_cleanup(struct bnx2x
*bp
, int unload_mode
, bool keep_link
);
373 * bnx2x_acquire_hw_lock - acquire HW lock.
376 * @resource: resource bit which was locked
378 int bnx2x_acquire_hw_lock(struct bnx2x
*bp
, u32 resource
);
381 * bnx2x_release_hw_lock - release HW lock.
384 * @resource: resource bit which was locked
386 int bnx2x_release_hw_lock(struct bnx2x
*bp
, u32 resource
);
389 * bnx2x_release_leader_lock - release recovery leader lock
393 int bnx2x_release_leader_lock(struct bnx2x
*bp
);
396 * bnx2x_set_eth_mac - configure eth MAC address in the HW
401 * Configures according to the value in netdev->dev_addr.
403 int bnx2x_set_eth_mac(struct bnx2x
*bp
, bool set
);
406 * bnx2x_set_rx_mode - set MAC filtering configurations.
410 * called with netif_tx_lock from dev_mcast.c
411 * If bp->state is OPEN, should be called with
412 * netif_addr_lock_bh()
414 void bnx2x_set_rx_mode_inner(struct bnx2x
*bp
);
416 /* Parity errors related */
417 void bnx2x_set_pf_load(struct bnx2x
*bp
);
418 bool bnx2x_clear_pf_load(struct bnx2x
*bp
);
419 bool bnx2x_chk_parity_attn(struct bnx2x
*bp
, bool *global
, bool print
);
420 bool bnx2x_reset_is_done(struct bnx2x
*bp
, int engine
);
421 void bnx2x_set_reset_in_progress(struct bnx2x
*bp
);
422 void bnx2x_set_reset_global(struct bnx2x
*bp
);
423 void bnx2x_disable_close_the_gate(struct bnx2x
*bp
);
424 int bnx2x_init_hw_func_cnic(struct bnx2x
*bp
);
427 * bnx2x_sp_event - handle ramrods completion.
429 * @fp: fastpath handle for the event
430 * @rr_cqe: eth_rx_cqe
432 void bnx2x_sp_event(struct bnx2x_fastpath
*fp
, union eth_rx_cqe
*rr_cqe
);
435 * bnx2x_ilt_set_info - prepare ILT configurations.
439 void bnx2x_ilt_set_info(struct bnx2x
*bp
);
442 * bnx2x_ilt_set_cnic_info - prepare ILT configurations for SRC
447 void bnx2x_ilt_set_info_cnic(struct bnx2x
*bp
);
450 * bnx2x_dcbx_init - initialize dcbx protocol.
454 void bnx2x_dcbx_init(struct bnx2x
*bp
, bool update_shmem
);
457 * bnx2x_set_power_state - set power state to the requested value.
460 * @state: required state D0 or D3hot
462 * Currently only D0 and D3hot are supported.
464 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
);
467 * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW.
472 void bnx2x_update_max_mf_config(struct bnx2x
*bp
, u32 value
);
474 void bnx2x_fw_dump_lvl(struct bnx2x
*bp
, const char *lvl
);
476 /* dev_close main block */
477 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
, bool keep_link
);
479 /* dev_open main block */
480 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
);
482 /* hard_xmit callback */
483 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
485 /* setup_tc callback */
486 int bnx2x_setup_tc(struct net_device
*dev
, u8 num_tc
);
488 int bnx2x_get_vf_config(struct net_device
*dev
, int vf
,
489 struct ifla_vf_info
*ivi
);
490 int bnx2x_set_vf_mac(struct net_device
*dev
, int queue
, u8
*mac
);
491 int bnx2x_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
, u8 qos
);
493 /* select_queue callback */
494 u16
bnx2x_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
495 void *accel_priv
, select_queue_fallback_t fallback
);
497 static inline void bnx2x_update_rx_prod(struct bnx2x
*bp
,
498 struct bnx2x_fastpath
*fp
,
499 u16 bd_prod
, u16 rx_comp_prod
,
502 struct ustorm_eth_rx_producers rx_prods
= {0};
505 /* Update producers */
506 rx_prods
.bd_prod
= bd_prod
;
507 rx_prods
.cqe_prod
= rx_comp_prod
;
508 rx_prods
.sge_prod
= rx_sge_prod
;
510 /* Make sure that the BD and SGE data is updated before updating the
511 * producers since FW might read the BD/SGE right after the producer
513 * This is only applicable for weak-ordered memory model archs such
514 * as IA-64. The following barrier is also mandatory since FW will
515 * assumes BDs must have buffers.
519 for (i
= 0; i
< sizeof(rx_prods
)/4; i
++)
520 REG_WR(bp
, fp
->ustorm_rx_prods_offset
+ i
*4,
521 ((u32
*)&rx_prods
)[i
]);
523 mmiowb(); /* keep prod updates ordered */
525 DP(NETIF_MSG_RX_STATUS
,
526 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
527 fp
->index
, bd_prod
, rx_comp_prod
, rx_sge_prod
);
531 int bnx2x_reload_if_running(struct net_device
*dev
);
533 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
);
535 /* NAPI poll Tx part */
536 int bnx2x_tx_int(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
);
538 /* suspend/resume callbacks */
539 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
);
540 int bnx2x_resume(struct pci_dev
*pdev
);
542 /* Release IRQ vectors */
543 void bnx2x_free_irq(struct bnx2x
*bp
);
545 void bnx2x_free_fp_mem(struct bnx2x
*bp
);
546 void bnx2x_init_rx_rings(struct bnx2x
*bp
);
547 void bnx2x_init_rx_rings_cnic(struct bnx2x
*bp
);
548 void bnx2x_free_skbs(struct bnx2x
*bp
);
549 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
);
550 void bnx2x_netif_start(struct bnx2x
*bp
);
551 int bnx2x_load_cnic(struct bnx2x
*bp
);
554 * bnx2x_enable_msix - set msix configuration.
558 * fills msix_table, requests vectors, updates num_queues
559 * according to number of available vectors.
561 int bnx2x_enable_msix(struct bnx2x
*bp
);
564 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
568 int bnx2x_enable_msi(struct bnx2x
*bp
);
571 * bnx2x_low_latency_recv - LL callback
573 * @napi: napi structure
575 int bnx2x_low_latency_recv(struct napi_struct
*napi
);
578 * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
582 int bnx2x_alloc_mem_bp(struct bnx2x
*bp
);
585 * bnx2x_free_mem_bp - release memories outsize main driver structure
589 void bnx2x_free_mem_bp(struct bnx2x
*bp
);
592 * bnx2x_change_mtu - change mtu netdev callback
595 * @new_mtu: requested mtu
598 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
);
600 #ifdef NETDEV_FCOE_WWNN
602 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port
605 * @wwn: output buffer
606 * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port)
609 int bnx2x_fcoe_get_wwn(struct net_device
*dev
, u64
*wwn
, int type
);
612 netdev_features_t
bnx2x_fix_features(struct net_device
*dev
,
613 netdev_features_t features
);
614 int bnx2x_set_features(struct net_device
*dev
, netdev_features_t features
);
617 * bnx2x_tx_timeout - tx timeout netdev callback
621 void bnx2x_tx_timeout(struct net_device
*dev
);
623 /*********************** Inlines **********************************/
624 /*********************** Fast path ********************************/
625 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath
*fp
)
627 barrier(); /* status block is written to by the chip */
628 fp
->fp_hc_idx
= fp
->sb_running_index
[SM_RX_ID
];
631 static inline void bnx2x_igu_ack_sb_gen(struct bnx2x
*bp
, u8 igu_sb_id
,
632 u8 segment
, u16 index
, u8 op
,
633 u8 update
, u32 igu_addr
)
635 struct igu_regular cmd_data
= {0};
637 cmd_data
.sb_id_and_flags
=
638 ((index
<< IGU_REGULAR_SB_INDEX_SHIFT
) |
639 (segment
<< IGU_REGULAR_SEGMENT_ACCESS_SHIFT
) |
640 (update
<< IGU_REGULAR_BUPDATE_SHIFT
) |
641 (op
<< IGU_REGULAR_ENABLE_INT_SHIFT
));
643 DP(NETIF_MSG_INTR
, "write 0x%08x to IGU addr 0x%x\n",
644 cmd_data
.sb_id_and_flags
, igu_addr
);
645 REG_WR(bp
, igu_addr
, cmd_data
.sb_id_and_flags
);
647 /* Make sure that ACK is written */
652 static inline void bnx2x_hc_ack_sb(struct bnx2x
*bp
, u8 sb_id
,
653 u8 storm
, u16 index
, u8 op
, u8 update
)
655 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
656 COMMAND_REG_INT_ACK
);
657 struct igu_ack_register igu_ack
;
659 igu_ack
.status_block_index
= index
;
660 igu_ack
.sb_id_and_flags
=
661 ((sb_id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
662 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
663 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
664 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
666 REG_WR(bp
, hc_addr
, (*(u32
*)&igu_ack
));
668 /* Make sure that ACK is written */
673 static inline void bnx2x_ack_sb(struct bnx2x
*bp
, u8 igu_sb_id
, u8 storm
,
674 u16 index
, u8 op
, u8 update
)
676 if (bp
->common
.int_block
== INT_BLOCK_HC
)
677 bnx2x_hc_ack_sb(bp
, igu_sb_id
, storm
, index
, op
, update
);
681 if (CHIP_INT_MODE_IS_BC(bp
))
683 else if (igu_sb_id
!= bp
->igu_dsb_id
)
684 segment
= IGU_SEG_ACCESS_DEF
;
685 else if (storm
== ATTENTION_ID
)
686 segment
= IGU_SEG_ACCESS_ATTN
;
688 segment
= IGU_SEG_ACCESS_DEF
;
689 bnx2x_igu_ack_sb(bp
, igu_sb_id
, segment
, index
, op
, update
);
693 static inline u16
bnx2x_hc_ack_int(struct bnx2x
*bp
)
695 u32 hc_addr
= (HC_REG_COMMAND_REG
+ BP_PORT(bp
)*32 +
696 COMMAND_REG_SIMD_MASK
);
697 u32 result
= REG_RD(bp
, hc_addr
);
703 static inline u16
bnx2x_igu_ack_int(struct bnx2x
*bp
)
705 u32 igu_addr
= (BAR_IGU_INTMEM
+ IGU_REG_SISR_MDPC_WMASK_LSB_UPPER
*8);
706 u32 result
= REG_RD(bp
, igu_addr
);
708 DP(NETIF_MSG_INTR
, "read 0x%08x from IGU addr 0x%x\n",
715 static inline u16
bnx2x_ack_int(struct bnx2x
*bp
)
718 if (bp
->common
.int_block
== INT_BLOCK_HC
)
719 return bnx2x_hc_ack_int(bp
);
721 return bnx2x_igu_ack_int(bp
);
724 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata
*txdata
)
726 /* Tell compiler that consumer and producer can change */
728 return txdata
->tx_pkt_prod
!= txdata
->tx_pkt_cons
;
731 static inline u16
bnx2x_tx_avail(struct bnx2x
*bp
,
732 struct bnx2x_fp_txdata
*txdata
)
738 prod
= txdata
->tx_bd_prod
;
739 cons
= txdata
->tx_bd_cons
;
741 used
= SUB_S16(prod
, cons
);
743 #ifdef BNX2X_STOP_ON_ERROR
745 WARN_ON(used
> txdata
->tx_ring_size
);
746 WARN_ON((txdata
->tx_ring_size
- used
) > MAX_TX_AVAIL
);
749 return (s16
)(txdata
->tx_ring_size
) - used
;
752 static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata
*txdata
)
756 /* Tell compiler that status block fields can change */
758 hw_cons
= le16_to_cpu(*txdata
->tx_cons_sb
);
759 return hw_cons
!= txdata
->tx_pkt_cons
;
762 static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath
*fp
)
765 for_each_cos_in_tx_queue(fp
, cos
)
766 if (bnx2x_tx_queue_has_work(fp
->txdata_ptr
[cos
]))
771 #define BNX2X_IS_CQE_COMPLETED(cqe_fp) (cqe_fp->marker == 0x0)
772 #define BNX2X_SEED_CQE(cqe_fp) (cqe_fp->marker = 0xFFFFFFFF)
773 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath
*fp
)
776 union eth_rx_cqe
*cqe
;
777 struct eth_fast_path_rx_cqe
*cqe_fp
;
779 cons
= RCQ_BD(fp
->rx_comp_cons
);
780 cqe
= &fp
->rx_comp_ring
[cons
];
781 cqe_fp
= &cqe
->fast_path_cqe
;
782 return BNX2X_IS_CQE_COMPLETED(cqe_fp
);
786 * bnx2x_tx_disable - disables tx from stack point of view
790 static inline void bnx2x_tx_disable(struct bnx2x
*bp
)
792 netif_tx_disable(bp
->dev
);
793 netif_carrier_off(bp
->dev
);
796 static inline void bnx2x_free_rx_sge(struct bnx2x
*bp
,
797 struct bnx2x_fastpath
*fp
, u16 index
)
799 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
800 struct page
*page
= sw_buf
->page
;
801 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
803 /* Skip "next page" elements */
807 dma_unmap_page(&bp
->pdev
->dev
, dma_unmap_addr(sw_buf
, mapping
),
808 SGE_PAGES
, DMA_FROM_DEVICE
);
809 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
816 static inline void bnx2x_del_all_napi_cnic(struct bnx2x
*bp
)
820 for_each_rx_queue_cnic(bp
, i
) {
821 napi_hash_del(&bnx2x_fp(bp
, i
, napi
));
822 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
826 static inline void bnx2x_del_all_napi(struct bnx2x
*bp
)
830 for_each_eth_queue(bp
, i
) {
831 napi_hash_del(&bnx2x_fp(bp
, i
, napi
));
832 netif_napi_del(&bnx2x_fp(bp
, i
, napi
));
836 int bnx2x_set_int_mode(struct bnx2x
*bp
);
838 static inline void bnx2x_disable_msi(struct bnx2x
*bp
)
840 if (bp
->flags
& USING_MSIX_FLAG
) {
841 pci_disable_msix(bp
->pdev
);
842 bp
->flags
&= ~(USING_MSIX_FLAG
| USING_SINGLE_MSIX_FLAG
);
843 } else if (bp
->flags
& USING_MSI_FLAG
) {
844 pci_disable_msi(bp
->pdev
);
845 bp
->flags
&= ~USING_MSI_FLAG
;
849 static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath
*fp
)
853 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
854 int idx
= RX_SGE_CNT
* i
- 1;
856 for (j
= 0; j
< 2; j
++) {
857 BIT_VEC64_CLEAR_BIT(fp
->sge_mask
, idx
);
863 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath
*fp
)
865 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
866 memset(fp
->sge_mask
, 0xff, sizeof(fp
->sge_mask
));
868 /* Clear the two last indices in the page to 1:
869 these are the indices that correspond to the "next" element,
870 hence will never be indicated and should be removed from
872 bnx2x_clear_sge_mask_next_elems(fp
);
875 /* note that we are not allocating a new buffer,
876 * we are just moving one from cons to prod
877 * we are not creating a new mapping,
878 * so there is no need to check for dma_mapping_error().
880 static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath
*fp
,
883 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
884 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
885 struct eth_rx_bd
*cons_bd
= &fp
->rx_desc_ring
[cons
];
886 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
888 dma_unmap_addr_set(prod_rx_buf
, mapping
,
889 dma_unmap_addr(cons_rx_buf
, mapping
));
890 prod_rx_buf
->data
= cons_rx_buf
->data
;
894 /************************* Init ******************************************/
896 /* returns func by VN for current port */
897 static inline int func_by_vn(struct bnx2x
*bp
, int vn
)
899 return 2 * vn
+ BP_PORT(bp
);
902 static inline int bnx2x_config_rss_eth(struct bnx2x
*bp
, bool config_hash
)
904 return bnx2x_rss(bp
, &bp
->rss_conf_obj
, config_hash
, true);
908 * bnx2x_func_start - init function
912 * Must be called before sending CLIENT_SETUP for the first client.
914 static inline int bnx2x_func_start(struct bnx2x
*bp
)
916 struct bnx2x_func_state_params func_params
= {NULL
};
917 struct bnx2x_func_start_params
*start_params
=
918 &func_params
.params
.start
;
920 /* Prepare parameters for function state transitions */
921 __set_bit(RAMROD_COMP_WAIT
, &func_params
.ramrod_flags
);
923 func_params
.f_obj
= &bp
->func_obj
;
924 func_params
.cmd
= BNX2X_F_CMD_START
;
926 /* Function parameters */
927 start_params
->mf_mode
= bp
->mf_mode
;
928 start_params
->sd_vlan_tag
= bp
->mf_ov
;
930 if (CHIP_IS_E2(bp
) || CHIP_IS_E3(bp
))
931 start_params
->network_cos_mode
= STATIC_COS
;
932 else /* CHIP_IS_E1X */
933 start_params
->network_cos_mode
= FW_WRR
;
935 start_params
->gre_tunnel_mode
= L2GRE_TUNNEL
;
936 start_params
->gre_tunnel_rss
= GRE_INNER_HEADERS_RSS
;
938 return bnx2x_func_state_change(bp
, &func_params
);
942 * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
944 * @fw_hi: pointer to upper part
945 * @fw_mid: pointer to middle part
946 * @fw_lo: pointer to lower part
947 * @mac: pointer to MAC address
949 static inline void bnx2x_set_fw_mac_addr(__le16
*fw_hi
, __le16
*fw_mid
,
950 __le16
*fw_lo
, u8
*mac
)
952 ((u8
*)fw_hi
)[0] = mac
[1];
953 ((u8
*)fw_hi
)[1] = mac
[0];
954 ((u8
*)fw_mid
)[0] = mac
[3];
955 ((u8
*)fw_mid
)[1] = mac
[2];
956 ((u8
*)fw_lo
)[0] = mac
[5];
957 ((u8
*)fw_lo
)[1] = mac
[4];
960 static inline void bnx2x_free_rx_sge_range(struct bnx2x
*bp
,
961 struct bnx2x_fastpath
*fp
, int last
)
968 for (i
= 0; i
< last
; i
++)
969 bnx2x_free_rx_sge(bp
, fp
, i
);
972 static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath
*fp
)
976 for (i
= 1; i
<= NUM_RX_RINGS
; i
++) {
977 struct eth_rx_bd
*rx_bd
;
979 rx_bd
= &fp
->rx_desc_ring
[RX_DESC_CNT
* i
- 2];
981 cpu_to_le32(U64_HI(fp
->rx_desc_mapping
+
982 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
984 cpu_to_le32(U64_LO(fp
->rx_desc_mapping
+
985 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
989 /* Statistics ID are global per chip/path, while Client IDs for E1x are per
992 static inline u8
bnx2x_stats_id(struct bnx2x_fastpath
*fp
)
994 struct bnx2x
*bp
= fp
->bp
;
995 if (!CHIP_IS_E1x(bp
)) {
996 /* there are special statistics counters for FCoE 136..140 */
998 return bp
->cnic_base_cl_id
+ (bp
->pf_num
>> 1);
1001 return fp
->cl_id
+ BP_PORT(bp
) * FP_SB_MAX_E1x
;
1004 static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath
*fp
,
1005 bnx2x_obj_type obj_type
)
1007 struct bnx2x
*bp
= fp
->bp
;
1009 /* Configure classification DBs */
1010 bnx2x_init_mac_obj(bp
, &bnx2x_sp_obj(bp
, fp
).mac_obj
, fp
->cl_id
,
1011 fp
->cid
, BP_FUNC(bp
), bnx2x_sp(bp
, mac_rdata
),
1012 bnx2x_sp_mapping(bp
, mac_rdata
),
1013 BNX2X_FILTER_MAC_PENDING
,
1014 &bp
->sp_state
, obj_type
,
1019 * bnx2x_get_path_func_num - get number of active functions
1021 * @bp: driver handle
1023 * Calculates the number of active (not hidden) functions on the
1026 static inline u8
bnx2x_get_path_func_num(struct bnx2x
*bp
)
1030 /* 57710 has only one function per-port */
1034 /* Calculate a number of functions enabled on the current
1037 if (CHIP_REV_IS_SLOW(bp
)) {
1043 for (i
= 0; i
< E1H_FUNC_MAX
/ 2; i
++) {
1046 func_mf_config
[BP_PORT(bp
) + 2 * i
].
1049 ((func_config
& FUNC_MF_CFG_FUNC_HIDE
) ? 0 : 1);
1058 static inline void bnx2x_init_bp_objs(struct bnx2x
*bp
)
1060 /* RX_MODE controlling object */
1061 bnx2x_init_rx_mode_obj(bp
, &bp
->rx_mode_obj
);
1063 /* multicast configuration controlling object */
1064 bnx2x_init_mcast_obj(bp
, &bp
->mcast_obj
, bp
->fp
->cl_id
, bp
->fp
->cid
,
1065 BP_FUNC(bp
), BP_FUNC(bp
),
1066 bnx2x_sp(bp
, mcast_rdata
),
1067 bnx2x_sp_mapping(bp
, mcast_rdata
),
1068 BNX2X_FILTER_MCAST_PENDING
, &bp
->sp_state
,
1071 /* Setup CAM credit pools */
1072 bnx2x_init_mac_credit_pool(bp
, &bp
->macs_pool
, BP_FUNC(bp
),
1073 bnx2x_get_path_func_num(bp
));
1075 bnx2x_init_vlan_credit_pool(bp
, &bp
->vlans_pool
, BP_ABS_FUNC(bp
)>>1,
1076 bnx2x_get_path_func_num(bp
));
1078 /* RSS configuration object */
1079 bnx2x_init_rss_config_obj(bp
, &bp
->rss_conf_obj
, bp
->fp
->cl_id
,
1080 bp
->fp
->cid
, BP_FUNC(bp
), BP_FUNC(bp
),
1081 bnx2x_sp(bp
, rss_rdata
),
1082 bnx2x_sp_mapping(bp
, rss_rdata
),
1083 BNX2X_FILTER_RSS_CONF_PENDING
, &bp
->sp_state
,
1087 static inline u8
bnx2x_fp_qzone_id(struct bnx2x_fastpath
*fp
)
1089 if (CHIP_IS_E1x(fp
->bp
))
1090 return fp
->cl_id
+ BP_PORT(fp
->bp
) * ETH_MAX_RX_CLIENTS_E1H
;
1095 static inline void bnx2x_init_txdata(struct bnx2x
*bp
,
1096 struct bnx2x_fp_txdata
*txdata
, u32 cid
,
1097 int txq_index
, __le16
*tx_cons_sb
,
1098 struct bnx2x_fastpath
*fp
)
1101 txdata
->txq_index
= txq_index
;
1102 txdata
->tx_cons_sb
= tx_cons_sb
;
1103 txdata
->parent_fp
= fp
;
1104 txdata
->tx_ring_size
= IS_FCOE_FP(fp
) ? MAX_TX_AVAIL
: bp
->tx_ring_size
;
1106 DP(NETIF_MSG_IFUP
, "created tx data cid %d, txq %d\n",
1107 txdata
->cid
, txdata
->txq_index
);
1110 static inline u8
bnx2x_cnic_eth_cl_id(struct bnx2x
*bp
, u8 cl_idx
)
1112 return bp
->cnic_base_cl_id
+ cl_idx
+
1113 (bp
->pf_num
>> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX
;
1116 static inline u8
bnx2x_cnic_fw_sb_id(struct bnx2x
*bp
)
1118 /* the 'first' id is allocated for the cnic */
1119 return bp
->base_fw_ndsb
;
1122 static inline u8
bnx2x_cnic_igu_sb_id(struct bnx2x
*bp
)
1124 return bp
->igu_base_sb
;
1127 static inline int bnx2x_clean_tx_queue(struct bnx2x
*bp
,
1128 struct bnx2x_fp_txdata
*txdata
)
1132 while (bnx2x_has_tx_work_unload(txdata
)) {
1134 BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
1135 txdata
->txq_index
, txdata
->tx_pkt_prod
,
1136 txdata
->tx_pkt_cons
);
1137 #ifdef BNX2X_STOP_ON_ERROR
1145 usleep_range(1000, 2000);
1151 int bnx2x_get_link_cfg_idx(struct bnx2x
*bp
);
1153 static inline void __storm_memset_struct(struct bnx2x
*bp
,
1154 u32 addr
, size_t size
, u32
*data
)
1157 for (i
= 0; i
< size
/4; i
++)
1158 REG_WR(bp
, addr
+ (i
* 4), data
[i
]);
1162 * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
1164 * @bp: driver handle
1165 * @mask: bits that need to be cleared
1167 static inline bool bnx2x_wait_sp_comp(struct bnx2x
*bp
, unsigned long mask
)
1169 int tout
= 5000; /* Wait for 5 secs tops */
1173 netif_addr_lock_bh(bp
->dev
);
1174 if (!(bp
->sp_state
& mask
)) {
1175 netif_addr_unlock_bh(bp
->dev
);
1178 netif_addr_unlock_bh(bp
->dev
);
1180 usleep_range(1000, 2000);
1185 netif_addr_lock_bh(bp
->dev
);
1186 if (bp
->sp_state
& mask
) {
1187 BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n",
1188 bp
->sp_state
, mask
);
1189 netif_addr_unlock_bh(bp
->dev
);
1192 netif_addr_unlock_bh(bp
->dev
);
1198 * bnx2x_set_ctx_validation - set CDU context validation values
1200 * @bp: driver handle
1201 * @cxt: context of the connection on the host memory
1202 * @cid: SW CID of the connection to be configured
1204 void bnx2x_set_ctx_validation(struct bnx2x
*bp
, struct eth_context
*cxt
,
1207 void bnx2x_update_coalesce_sb_index(struct bnx2x
*bp
, u8 fw_sb_id
,
1208 u8 sb_index
, u8 disable
, u16 usec
);
1209 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
);
1210 void bnx2x_release_phy_lock(struct bnx2x
*bp
);
1213 * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
1215 * @bp: driver handle
1216 * @mf_cfg: MF configuration
1219 static inline u16
bnx2x_extract_max_cfg(struct bnx2x
*bp
, u32 mf_cfg
)
1221 u16 max_cfg
= (mf_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
1222 FUNC_MF_CFG_MAX_BW_SHIFT
;
1224 DP(NETIF_MSG_IFUP
| BNX2X_MSG_ETHTOOL
,
1225 "Max BW configured to 0 - using 100 instead\n");
1231 /* checks if HW supports GRO for given MTU */
1232 static inline bool bnx2x_mtu_allows_gro(int mtu
)
1234 /* gro frags per page */
1235 int fpp
= SGE_PAGE_SIZE
/ (mtu
- ETH_MAX_TPA_HEADER_SIZE
);
1238 * 1. Number of frags should not grow above MAX_SKB_FRAGS
1239 * 2. Frag must fit the page
1241 return mtu
<= SGE_PAGE_SIZE
&& (U_ETH_SGL_SIZE
* fpp
) <= MAX_SKB_FRAGS
;
1245 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
1247 * @bp: driver handle
1250 void bnx2x_get_iscsi_info(struct bnx2x
*bp
);
1253 * bnx2x_link_sync_notify - send notification to other functions.
1255 * @bp: driver handle
1258 static inline void bnx2x_link_sync_notify(struct bnx2x
*bp
)
1263 /* Set the attention towards other drivers on the same port */
1264 for (vn
= VN_0
; vn
< BP_MAX_VN_NUM(bp
); vn
++) {
1265 if (vn
== BP_VN(bp
))
1268 func
= func_by_vn(bp
, vn
);
1269 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
1270 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
1275 * bnx2x_update_drv_flags - update flags in shmem
1277 * @bp: driver handle
1278 * @flags: flags to update
1279 * @set: set or clear
1282 static inline void bnx2x_update_drv_flags(struct bnx2x
*bp
, u32 flags
, u32 set
)
1284 if (SHMEM2_HAS(bp
, drv_flags
)) {
1286 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_DRV_FLAGS
);
1287 drv_flags
= SHMEM2_RD(bp
, drv_flags
);
1290 SET_FLAGS(drv_flags
, flags
);
1292 RESET_FLAGS(drv_flags
, flags
);
1294 SHMEM2_WR(bp
, drv_flags
, drv_flags
);
1295 DP(NETIF_MSG_IFUP
, "drv_flags 0x%08x\n", drv_flags
);
1296 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_DRV_FLAGS
);
1300 static inline bool bnx2x_is_valid_ether_addr(struct bnx2x
*bp
, u8
*addr
)
1302 if (is_valid_ether_addr(addr
) ||
1303 (is_zero_ether_addr(addr
) &&
1304 (IS_MF_STORAGE_SD(bp
) || IS_MF_FCOE_AFEX(bp
))))
1311 * bnx2x_fill_fw_str - Fill buffer with FW version string
1313 * @bp: driver handle
1314 * @buf: character buffer to fill with the fw name
1315 * @buf_len: length of the above buffer
1318 void bnx2x_fill_fw_str(struct bnx2x
*bp
, char *buf
, size_t buf_len
);
1320 int bnx2x_drain_tx_queues(struct bnx2x
*bp
);
1321 void bnx2x_squeeze_objects(struct bnx2x
*bp
);
1323 void bnx2x_schedule_sp_rtnl(struct bnx2x
*, enum sp_rtnl_flag
,
1326 #endif /* BNX2X_CMN_H */