]> git.proxmox.com Git - ceph.git/blame - ceph/src/dpdk/drivers/net/qede/base/ecore.h
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / drivers / net / qede / base / ecore.h
CommitLineData
7c673cae
FG
1/*
2 * Copyright (c) 2016 QLogic Corporation.
3 * All rights reserved.
4 * www.qlogic.com
5 *
6 * See LICENSE.qede_pmd for copyright and licensing details.
7 */
8
9#ifndef __ECORE_H
10#define __ECORE_H
11
12/* @DPDK */
13#include <sys/stat.h>
14#include <fcntl.h>
15#include <unistd.h>
16
17#define CONFIG_ECORE_BINARY_FW
18#undef CONFIG_ECORE_ZIPPED_FW
19
20#ifdef CONFIG_ECORE_ZIPPED_FW
21#include <zlib.h>
22#endif
23
24#include "ecore_hsi_common.h"
25#include "ecore_hsi_debug_tools.h"
26#include "ecore_hsi_init_func.h"
27#include "ecore_hsi_init_tool.h"
28#include "ecore_proto_if.h"
29#include "mcp_public.h"
30
31#define MAX_HWFNS_PER_DEVICE (4)
32#define NAME_SIZE 128 /* @DPDK */
33#define VER_SIZE 16
34#define ECORE_WFQ_UNIT 100
35#include "../qede_logs.h" /* @DPDK */
36
37#define ISCSI_BDQ_ID(_port_id) (_port_id)
38#define FCOE_BDQ_ID(_port_id) (_port_id + 2)
39/* Constants */
40#define ECORE_WID_SIZE (1024)
41
42/* Configurable */
43#define ECORE_PF_DEMS_SIZE (4)
44
45/* cau states */
46enum ecore_coalescing_mode {
47 ECORE_COAL_MODE_DISABLE,
48 ECORE_COAL_MODE_ENABLE
49};
50
51enum ecore_nvm_cmd {
52 ECORE_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN,
53 ECORE_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA,
54 ECORE_NVM_READ_NVRAM = DRV_MSG_CODE_NVM_READ_NVRAM,
55 ECORE_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
56 ECORE_NVM_DEL_FILE = DRV_MSG_CODE_NVM_DEL_FILE,
57 ECORE_NVM_SET_SECURE_MODE = DRV_MSG_CODE_SET_SECURE_MODE,
58 ECORE_PHY_RAW_READ = DRV_MSG_CODE_PHY_RAW_READ,
59 ECORE_PHY_RAW_WRITE = DRV_MSG_CODE_PHY_RAW_WRITE,
60 ECORE_PHY_CORE_READ = DRV_MSG_CODE_PHY_CORE_READ,
61 ECORE_PHY_CORE_WRITE = DRV_MSG_CODE_PHY_CORE_WRITE,
62 ECORE_GET_MCP_NVM_RESP = 0xFFFFFF00
63};
64
65#ifndef LINUX_REMOVE
66#if !defined(CONFIG_ECORE_L2)
67#define CONFIG_ECORE_L2
68#define CONFIG_ECORE_SRIOV
69#endif
70#endif
71
72/* helpers */
73#ifndef __EXTRACT__LINUX__
74#define MASK_FIELD(_name, _value) \
75 ((_value) &= (_name##_MASK))
76
77#define FIELD_VALUE(_name, _value) \
78 ((_value & _name##_MASK) << _name##_SHIFT)
79
80#define SET_FIELD(value, name, flag) \
81do { \
82 (value) &= ~(name##_MASK << name##_SHIFT); \
83 (value) |= (((u64)flag) << (name##_SHIFT)); \
84} while (0)
85
86#define GET_FIELD(value, name) \
87 (((value) >> (name##_SHIFT)) & name##_MASK)
88#endif
89
90static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
91{
92 u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
93 (cid * ECORE_PF_DEMS_SIZE);
94
95 return db_addr;
96}
97
98static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
99{
100 u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
101 FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
102
103 return db_addr;
104}
105
106#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
107 ((sizeof(type_name) + (u32)(1 << (p_hwfn->p_dev->cache_shift)) - 1) & \
108 ~((1 << (p_hwfn->p_dev->cache_shift)) - 1))
109
110#ifndef LINUX_REMOVE
111#ifndef U64_HI
112#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
113#endif
114
115#ifndef U64_LO
116#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
117#endif
118#endif
119
120#ifndef __EXTRACT__LINUX__
121enum DP_LEVEL {
122 ECORE_LEVEL_VERBOSE = 0x0,
123 ECORE_LEVEL_INFO = 0x1,
124 ECORE_LEVEL_NOTICE = 0x2,
125 ECORE_LEVEL_ERR = 0x3,
126};
127
128#define ECORE_LOG_LEVEL_SHIFT (30)
129#define ECORE_LOG_VERBOSE_MASK (0x3fffffff)
130#define ECORE_LOG_INFO_MASK (0x40000000)
131#define ECORE_LOG_NOTICE_MASK (0x80000000)
132
133enum DP_MODULE {
134#ifndef LINUX_REMOVE
135 ECORE_MSG_DRV = 0x0001,
136 ECORE_MSG_PROBE = 0x0002,
137 ECORE_MSG_LINK = 0x0004,
138 ECORE_MSG_TIMER = 0x0008,
139 ECORE_MSG_IFDOWN = 0x0010,
140 ECORE_MSG_IFUP = 0x0020,
141 ECORE_MSG_RX_ERR = 0x0040,
142 ECORE_MSG_TX_ERR = 0x0080,
143 ECORE_MSG_TX_QUEUED = 0x0100,
144 ECORE_MSG_INTR = 0x0200,
145 ECORE_MSG_TX_DONE = 0x0400,
146 ECORE_MSG_RX_STATUS = 0x0800,
147 ECORE_MSG_PKTDATA = 0x1000,
148 ECORE_MSG_HW = 0x2000,
149 ECORE_MSG_WOL = 0x4000,
150#endif
151 ECORE_MSG_SPQ = 0x10000,
152 ECORE_MSG_STATS = 0x20000,
153 ECORE_MSG_DCB = 0x40000,
154 ECORE_MSG_IOV = 0x80000,
155 ECORE_MSG_SP = 0x100000,
156 ECORE_MSG_STORAGE = 0x200000,
157 ECORE_MSG_OOO = 0x200000,
158 ECORE_MSG_CXT = 0x800000,
159 ECORE_MSG_LL2 = 0x1000000,
160 ECORE_MSG_ILT = 0x2000000,
161 ECORE_MSG_RDMA = 0x4000000,
162 ECORE_MSG_DEBUG = 0x8000000,
163 /* to be added...up to 0x8000000 */
164};
165#endif
166
167#define for_each_hwfn(p_dev, i) for (i = 0; i < p_dev->num_hwfns; i++)
168
169#define D_TRINE(val, cond1, cond2, true1, true2, def) \
170 (val == (cond1) ? true1 : \
171 (val == (cond2) ? true2 : def))
172
173/* forward */
174struct ecore_ptt_pool;
175struct ecore_spq;
176struct ecore_sb_info;
177struct ecore_sb_attn_info;
178struct ecore_cxt_mngr;
179struct ecore_dma_mem;
180struct ecore_sb_sp_info;
181struct ecore_ll2_info;
182struct ecore_igu_info;
183struct ecore_mcp_info;
184struct ecore_dcbx_info;
185
186struct ecore_rt_data {
187 u32 *init_val;
188 bool *b_valid;
189};
190
191enum ecore_tunn_mode {
192 ECORE_MODE_L2GENEVE_TUNN,
193 ECORE_MODE_IPGENEVE_TUNN,
194 ECORE_MODE_L2GRE_TUNN,
195 ECORE_MODE_IPGRE_TUNN,
196 ECORE_MODE_VXLAN_TUNN,
197};
198
199enum ecore_tunn_clss {
200 ECORE_TUNN_CLSS_MAC_VLAN,
201 ECORE_TUNN_CLSS_MAC_VNI,
202 ECORE_TUNN_CLSS_INNER_MAC_VLAN,
203 ECORE_TUNN_CLSS_INNER_MAC_VNI,
204 ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
205 MAX_ECORE_TUNN_CLSS,
206};
207
208struct ecore_tunn_start_params {
209 unsigned long tunn_mode;
210 u16 vxlan_udp_port;
211 u16 geneve_udp_port;
212 u8 update_vxlan_udp_port;
213 u8 update_geneve_udp_port;
214 u8 tunn_clss_vxlan;
215 u8 tunn_clss_l2geneve;
216 u8 tunn_clss_ipgeneve;
217 u8 tunn_clss_l2gre;
218 u8 tunn_clss_ipgre;
219};
220
221struct ecore_tunn_update_params {
222 unsigned long tunn_mode_update_mask;
223 unsigned long tunn_mode;
224 u16 vxlan_udp_port;
225 u16 geneve_udp_port;
226 u8 update_rx_pf_clss;
227 u8 update_tx_pf_clss;
228 u8 update_vxlan_udp_port;
229 u8 update_geneve_udp_port;
230 u8 tunn_clss_vxlan;
231 u8 tunn_clss_l2geneve;
232 u8 tunn_clss_ipgeneve;
233 u8 tunn_clss_l2gre;
234 u8 tunn_clss_ipgre;
235};
236
237/* The PCI personality is not quite synonymous to protocol ID:
238 * 1. All personalities need CORE connections
239 * 2. The Ethernet personality may support also the RoCE/iWARP protocol
240 */
241enum ecore_pci_personality {
242 ECORE_PCI_ETH,
243 ECORE_PCI_FCOE,
244 ECORE_PCI_ISCSI,
245 ECORE_PCI_ETH_ROCE,
246 ECORE_PCI_IWARP,
247 ECORE_PCI_DEFAULT /* default in shmem */
248};
249
250/* All VFs are symmetric, all counters are PF + all VFs */
251struct ecore_qm_iids {
252 u32 cids;
253 u32 vf_cids;
254 u32 tids;
255};
256
257#define MAX_PF_PER_PORT 8
258
259/* HW / FW resources, output of features supported below, most information
260 * is received from MFW.
261 */
262enum ecore_resources {
263 ECORE_SB,
264 ECORE_L2_QUEUE,
265 ECORE_VPORT,
266 ECORE_RSS_ENG,
267 ECORE_PQ,
268 ECORE_RL,
269 ECORE_MAC,
270 ECORE_VLAN,
271 ECORE_RDMA_CNQ_RAM,
272 ECORE_ILT,
273 ECORE_LL2_QUEUE,
274 ECORE_CMDQS_CQS,
275 ECORE_RDMA_STATS_QUEUE,
276 ECORE_MAX_RESC, /* must be last */
277};
278
279/* Features that require resources, given as input to the resource management
280 * algorithm, the output are the resources above
281 */
282enum ecore_feature {
283 ECORE_PF_L2_QUE,
284 ECORE_PF_TC,
285 ECORE_VF,
286 ECORE_EXTRA_VF_QUE,
287 ECORE_VMQ,
288 ECORE_RDMA_CNQ,
289 ECORE_ISCSI_CQ,
290 ECORE_FCOE_CQ,
291 ECORE_MAX_FEATURES,
292};
293
294enum ecore_port_mode {
295 ECORE_PORT_MODE_DE_2X40G,
296 ECORE_PORT_MODE_DE_2X50G,
297 ECORE_PORT_MODE_DE_1X100G,
298 ECORE_PORT_MODE_DE_4X10G_F,
299 ECORE_PORT_MODE_DE_4X10G_E,
300 ECORE_PORT_MODE_DE_4X20G,
301 ECORE_PORT_MODE_DE_1X40G,
302 ECORE_PORT_MODE_DE_2X25G,
303 ECORE_PORT_MODE_DE_1X25G,
304 ECORE_PORT_MODE_DE_4X25G,
305};
306
307enum ecore_dev_cap {
308 ECORE_DEV_CAP_ETH,
309 ECORE_DEV_CAP_FCOE,
310 ECORE_DEV_CAP_ISCSI,
311 ECORE_DEV_CAP_ROCE,
312 ECORE_DEV_CAP_IWARP
313};
314
315#ifndef __EXTRACT__LINUX__
316enum ecore_hw_err_type {
317 ECORE_HW_ERR_FAN_FAIL,
318 ECORE_HW_ERR_MFW_RESP_FAIL,
319 ECORE_HW_ERR_HW_ATTN,
320 ECORE_HW_ERR_DMAE_FAIL,
321 ECORE_HW_ERR_RAMROD_FAIL,
322 ECORE_HW_ERR_FW_ASSERT,
323};
324#endif
325
326struct ecore_hw_info {
327 /* PCI personality */
328 enum ecore_pci_personality personality;
329
330 /* Resource Allocation scheme results */
331 u32 resc_start[ECORE_MAX_RESC];
332 u32 resc_num[ECORE_MAX_RESC];
333 u32 feat_num[ECORE_MAX_FEATURES];
334
335 #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
336 #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
337 #define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
338 RESC_NUM(_p_hwfn, resc))
339 #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
340
341 /* Amount of traffic classes HW supports */
342 u8 num_hw_tc;
343
344/* Amount of TCs which should be active according to DCBx or upper layer driver
345 * configuration
346 */
347
348 u8 num_active_tc;
349
350 /* Traffic class used for tcp out of order traffic */
351 u8 ooo_tc;
352
353 /* The traffic class used by PF for it's offloaded protocol */
354 u8 offload_tc;
355
356 u32 concrete_fid;
357 u16 opaque_fid;
358 u16 ovlan;
359 u32 part_num[4];
360
361 unsigned char hw_mac_addr[ETH_ALEN];
362 u64 node_wwn; /* For FCoE only */
363 u64 port_wwn; /* For FCoE only */
364
365 u16 num_iscsi_conns;
366 u16 num_fcoe_conns;
367
368 struct ecore_igu_info *p_igu_info;
369 /* Sriov */
370 u8 max_chains_per_vf;
371
372 u32 port_mode;
373 u32 hw_mode;
374 unsigned long device_capabilities;
375};
376
377struct ecore_hw_cid_data {
378 u32 cid;
379 bool b_cid_allocated;
380 u8 vfid; /* 1-based; 0 signals this is for a PF */
381
382 /* Additional identifiers */
383 u16 opaque_fid;
384 u8 vport_id;
385};
386
387/* maximun size of read/write commands (HW limit) */
388#define DMAE_MAX_RW_SIZE 0x2000
389
390struct ecore_dmae_info {
391 /* Mutex for synchronizing access to functions */
392 osal_mutex_t mutex;
393
394 u8 channel;
395
396 dma_addr_t completion_word_phys_addr;
397
398 /* The memory location where the DMAE writes the completion
399 * value when an operation is finished on this context.
400 */
401 u32 *p_completion_word;
402
403 dma_addr_t intermediate_buffer_phys_addr;
404
405 /* An intermediate buffer for DMAE operations that use virtual
406 * addresses - data is DMA'd to/from this buffer and then
407 * memcpy'd to/from the virtual address
408 */
409 u32 *p_intermediate_buffer;
410
411 dma_addr_t dmae_cmd_phys_addr;
412 struct dmae_cmd *p_dmae_cmd;
413};
414
415struct ecore_wfq_data {
416 u32 default_min_speed; /* When wfq feature is not configured */
417 u32 min_speed; /* when feature is configured for any 1 vport */
418 bool configured;
419};
420
421struct ecore_qm_info {
422 struct init_qm_pq_params *qm_pq_params;
423 struct init_qm_vport_params *qm_vport_params;
424 struct init_qm_port_params *qm_port_params;
425 u16 start_pq;
426 u8 start_vport;
427 u8 pure_lb_pq;
428 u8 offload_pq;
429 u8 pure_ack_pq;
430 u8 ooo_pq;
431 u8 vf_queues_offset;
432 u16 num_pqs;
433 u16 num_vf_pqs;
434 u8 num_vports;
435 u8 max_phys_tcs_per_port;
436 bool pf_rl_en;
437 bool pf_wfq_en;
438 bool vport_rl_en;
439 bool vport_wfq_en;
440 u8 pf_wfq;
441 u32 pf_rl;
442 struct ecore_wfq_data *wfq_data;
443 u8 num_pf_rls;
444};
445
446struct storm_stats {
447 u32 address;
448 u32 len;
449};
450
451struct ecore_fw_data {
452#ifdef CONFIG_ECORE_BINARY_FW
453 struct fw_ver_info *fw_ver_info;
454#endif
455 const u8 *modes_tree_buf;
456 union init_op *init_ops;
457 const u32 *arr_data;
458 u32 init_ops_size;
459};
460
461struct ecore_hwfn {
462 struct ecore_dev *p_dev;
463 u8 my_id; /* ID inside the PF */
464#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
465 u8 rel_pf_id; /* Relative to engine*/
466 u8 abs_pf_id;
467 #define ECORE_PATH_ID(_p_hwfn) \
468 (ECORE_IS_K2((_p_hwfn)->p_dev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
469 u8 port_id;
470 bool b_active;
471
472 u32 dp_module;
473 u8 dp_level;
474 char name[NAME_SIZE];
475 void *dp_ctx;
476
477 bool first_on_engine;
478 bool hw_init_done;
479
480 u8 num_funcs_on_engine;
481 u8 enabled_func_idx;
482
483 /* BAR access */
484 void OSAL_IOMEM *regview;
485 void OSAL_IOMEM *doorbells;
486 u64 db_phys_addr;
487 unsigned long db_size;
488
489 /* PTT pool */
490 struct ecore_ptt_pool *p_ptt_pool;
491
492 /* HW info */
493 struct ecore_hw_info hw_info;
494
495 /* rt_array (for init-tool) */
496 struct ecore_rt_data rt_data;
497
498 /* SPQ */
499 struct ecore_spq *p_spq;
500
501 /* EQ */
502 struct ecore_eq *p_eq;
503
504 /* Consolidate Q*/
505 struct ecore_consq *p_consq;
506
507 /* Slow-Path definitions */
508 osal_dpc_t sp_dpc;
509 bool b_sp_dpc_enabled;
510
511 struct ecore_ptt *p_main_ptt;
512 struct ecore_ptt *p_dpc_ptt;
513
514 struct ecore_sb_sp_info *p_sp_sb;
515 struct ecore_sb_attn_info *p_sb_attn;
516
517 /* Protocol related */
518 bool using_ll2;
519 struct ecore_ll2_info *p_ll2_info;
520 struct ecore_ooo_info *p_ooo_info;
521 struct ecore_iscsi_info *p_iscsi_info;
522 struct ecore_fcoe_info *p_fcoe_info;
523 struct ecore_rdma_info *p_rdma_info;
524 struct ecore_pf_params pf_params;
525
526 bool b_rdma_enabled_in_prs;
527 u32 rdma_prs_search_reg;
528
529 /* Array of sb_info of all status blocks */
530 struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
531 u16 num_sbs;
532
533 struct ecore_cxt_mngr *p_cxt_mngr;
534
535 /* Flag indicating whether interrupts are enabled or not*/
536 bool b_int_enabled;
537 bool b_int_requested;
538
539 /* True if the driver requests for the link */
540 bool b_drv_link_init;
541
542 struct ecore_vf_iov *vf_iov_info;
543 struct ecore_pf_iov *pf_iov_info;
544 struct ecore_mcp_info *mcp_info;
545 struct ecore_dcbx_info *p_dcbx_info;
546
547 struct ecore_hw_cid_data *p_tx_cids;
548 struct ecore_hw_cid_data *p_rx_cids;
549
550 struct ecore_dmae_info dmae_info;
551
552 /* QM init */
553 struct ecore_qm_info qm_info;
554
555#ifdef CONFIG_ECORE_ZIPPED_FW
556 /* Buffer for unzipping firmware data */
557 void *unzip_buf;
558#endif
559
560 struct dbg_tools_data dbg_info;
561
562 struct z_stream_s *stream;
563
564 /* PWM region specific data */
565 u32 dpi_size;
566 u32 dpi_count;
567 u32 dpi_start_offset; /* this is used to
568 * calculate th
569 * doorbell address
570 */
571
572 /* If one of the following is set then EDPM shouldn't be used */
573 u8 dcbx_no_edpm;
574 u8 db_bar_no_edpm;
575};
576
577#ifndef __EXTRACT__LINUX__
578enum ecore_mf_mode {
579 ECORE_MF_DEFAULT,
580 ECORE_MF_OVLAN,
581 ECORE_MF_NPAR,
582};
583#endif
584
585/* @DPDK */
586struct ecore_dbg_feature {
587 u8 *dump_buf;
588 u32 buf_size;
589 u32 dumped_dwords;
590};
591
592enum qed_dbg_features {
593 DBG_FEATURE_BUS,
594 DBG_FEATURE_GRC,
595 DBG_FEATURE_IDLE_CHK,
596 DBG_FEATURE_MCP_TRACE,
597 DBG_FEATURE_REG_FIFO,
598 DBG_FEATURE_PROTECTION_OVERRIDE,
599 DBG_FEATURE_NUM
600};
601
602struct ecore_dev {
603 u32 dp_module;
604 u8 dp_level;
605 char name[NAME_SIZE];
606 void *dp_ctx;
607
608 u8 type;
609#define ECORE_DEV_TYPE_BB (0 << 0)
610#define ECORE_DEV_TYPE_AH (1 << 0)
611/* Translate type/revision combo into the proper conditions */
612#define ECORE_IS_BB(dev) ((dev)->type == ECORE_DEV_TYPE_BB)
613#define ECORE_IS_BB_A0(dev) (ECORE_IS_BB(dev) && CHIP_REV_IS_A0(dev))
614#ifndef ASIC_ONLY
615#define ECORE_IS_BB_B0(dev) ((ECORE_IS_BB(dev) && CHIP_REV_IS_B0(dev)) || \
616 (CHIP_REV_IS_TEDIBEAR(dev)))
617#else
618#define ECORE_IS_BB_B0(dev) (ECORE_IS_BB(dev) && CHIP_REV_IS_B0(dev))
619#endif
620#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
621#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
622
623 u16 vendor_id;
624 u16 device_id;
625
626 u16 chip_num;
627 #define CHIP_NUM_MASK 0xffff
628 #define CHIP_NUM_SHIFT 16
629
630 u16 chip_rev;
631 #define CHIP_REV_MASK 0xf
632 #define CHIP_REV_SHIFT 12
633#ifndef ASIC_ONLY
634 #define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
635 #define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
636 #define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
637 #define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \
638 CHIP_REV_IS_EMUL_B0(_p_dev))
639 #define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
640 #define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
641 #define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \
642 CHIP_REV_IS_FPGA_B0(_p_dev))
643 #define CHIP_REV_IS_SLOW(_p_dev) \
644 (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
645 #define CHIP_REV_IS_A0(_p_dev) \
646 (CHIP_REV_IS_EMUL_A0(_p_dev) || \
647 CHIP_REV_IS_FPGA_A0(_p_dev) || \
648 !(_p_dev)->chip_rev)
649 #define CHIP_REV_IS_B0(_p_dev) \
650 (CHIP_REV_IS_EMUL_B0(_p_dev) || \
651 CHIP_REV_IS_FPGA_B0(_p_dev) || \
652 (_p_dev)->chip_rev == 1)
653 #define CHIP_REV_IS_ASIC(_p_dev) !CHIP_REV_IS_SLOW(_p_dev)
654#else
655 #define CHIP_REV_IS_A0(_p_dev) (!(_p_dev)->chip_rev)
656 #define CHIP_REV_IS_B0(_p_dev) ((_p_dev)->chip_rev == 1)
657#endif
658
659 u16 chip_metal;
660 #define CHIP_METAL_MASK 0xff
661 #define CHIP_METAL_SHIFT 4
662
663 u16 chip_bond_id;
664 #define CHIP_BOND_ID_MASK 0xf
665 #define CHIP_BOND_ID_SHIFT 0
666
667 u8 num_engines;
668 u8 num_ports_in_engines;
669 u8 num_funcs_in_port;
670
671 u8 path_id;
672 enum ecore_mf_mode mf_mode;
673 #define IS_MF_DEFAULT(_p_hwfn) \
674 (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
675 #define IS_MF_SI(_p_hwfn) \
676 (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
677 #define IS_MF_SD(_p_hwfn) \
678 (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
679
680 int pcie_width;
681 int pcie_speed;
682 u8 ver_str[NAME_SIZE]; /* @DPDK */
683 /* Add MF related configuration */
684 u8 mcp_rev;
685 u8 boot_mode;
686
687 u8 wol;
688
689 u32 int_mode;
690 enum ecore_coalescing_mode int_coalescing_mode;
691 u16 rx_coalesce_usecs;
692 u16 tx_coalesce_usecs;
693
694 /* Start Bar offset of first hwfn */
695 void OSAL_IOMEM *regview;
696 void OSAL_IOMEM *doorbells;
697 u64 db_phys_addr;
698 unsigned long db_size;
699
700 /* PCI */
701 u8 cache_shift;
702
703 /* Init */
704 const struct iro *iro_arr;
705 #define IRO (p_hwfn->p_dev->iro_arr)
706
707 /* HW functions */
708 u8 num_hwfns;
709 struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
710
711 /* SRIOV */
712 struct ecore_hw_sriov_info *p_iov_info;
713#define IS_ECORE_SRIOV(p_dev) (!!(p_dev)->p_iov_info)
714 bool b_hw_channel;
715
716 unsigned long tunn_mode;
717
718 bool b_is_vf;
719
720 u32 drv_type;
721
722 u32 rdma_max_sge;
723 u32 rdma_max_inline;
724 u32 rdma_max_srq_sge;
725
726 struct ecore_eth_stats *reset_stats;
727 struct ecore_fw_data *fw_data;
728
729 u32 mcp_nvm_resp;
730
731 /* Recovery */
732 bool recov_in_prog;
733
734/* Indicates whether should prevent attentions from being reasserted */
735
736 bool attn_clr_en;
737
738 /* Indicates whether allowing the MFW to collect a crash dump */
739 bool mdump_en;
740
741 /* Indicates if the reg_fifo is checked after any register access */
742 bool chk_reg_fifo;
743
744#ifndef ASIC_ONLY
745 bool b_is_emul_full;
746#endif
747
748#ifdef CONFIG_ECORE_BINARY_FW /* @DPDK */
749 void *firmware;
750 u64 fw_len;
751#endif
752
753 /* @DPDK */
754 struct ecore_dbg_feature dbg_features[DBG_FEATURE_NUM];
755 u8 engine_for_debug;
756};
757
758#define NUM_OF_VFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_VFS_BB \
759 : MAX_NUM_VFS_K2)
760#define NUM_OF_L2_QUEUES(dev) (ECORE_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
761 : MAX_NUM_L2_QUEUES_K2)
762#define NUM_OF_PORTS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PORTS_BB \
763 : MAX_NUM_PORTS_K2)
764#define NUM_OF_SBS(dev) (ECORE_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
765 : MAX_SB_PER_PATH_K2)
766#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
767 : MAX_NUM_PFS_K2)
768
769#ifndef REAL_ASIC_ONLY
770#define ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn) ( \
771 (ECORE_IS_BB_A0(p_hwfn->p_dev)) && \
772 (ECORE_PATH_ID(p_hwfn) == 1) && \
773 ((p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X40G) || \
774 (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X50G) || \
775 (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X25G)))
776#endif
777
778/**
779 * @brief ecore_concrete_to_sw_fid - get the sw function id from
780 * the concrete value.
781 *
782 * @param concrete_fid
783 *
784 * @return OSAL_INLINE u8
785 */
786static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
787 u32 concrete_fid)
788{
789 u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
790 u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
791 u8 vf_valid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID);
792 u8 sw_fid;
793
794 if (vf_valid)
795 sw_fid = vfid + MAX_NUM_PFS;
796 else
797 sw_fid = pfid;
798
799 return sw_fid;
800}
801
802#define PURE_LB_TC 8
803#define OOO_LB_TC 9
804
805int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
806void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
807 u32 min_pf_rate);
808
809int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw);
810int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw);
811void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
812int ecore_device_num_engines(struct ecore_dev *p_dev);
813int ecore_device_num_ports(struct ecore_dev *p_dev);
814
815#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
816
817#endif /* __ECORE_H */