]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/ethernet/qlogic/qed/qed.h
Merge remote-tracking branches 'asoc/topic/sgtl5000', 'asoc/topic/simple', 'asoc...
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / qlogic / qed / qed.h
CommitLineData
fe56b9e6
YM
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#ifndef _QED_H
10#define _QED_H
11
12#include <linux/types.h>
13#include <linux/io.h>
14#include <linux/delay.h>
15#include <linux/firmware.h>
16#include <linux/interrupt.h>
17#include <linux/list.h>
18#include <linux/mutex.h>
19#include <linux/pci.h>
20#include <linux/slab.h>
21#include <linux/string.h>
22#include <linux/workqueue.h>
23#include <linux/zlib.h>
24#include <linux/hashtable.h>
25#include <linux/qed/qed_if.h>
26#include "qed_hsi.h"
27
25c089d7 28extern const struct qed_common_ops qed_common_ops_pass;
7c2d7d74 29#define DRV_MODULE_VERSION "8.7.1.20"
fe56b9e6
YM
30
31#define MAX_HWFNS_PER_DEVICE (4)
32#define NAME_SIZE 16
33#define VER_SIZE 16
34
bcd197c8
MC
35#define QED_WFQ_UNIT 100
36
fe56b9e6
YM
37/* cau states */
38enum qed_coalescing_mode {
39 QED_COAL_MODE_DISABLE,
40 QED_COAL_MODE_ENABLE
41};
42
43struct qed_eth_cb_ops;
44struct qed_dev_info;
45
46/* helpers */
47static inline u32 qed_db_addr(u32 cid, u32 DEMS)
48{
49 u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
50 FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
51
52 return db_addr;
53}
54
55#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
56 ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
57 ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
58
59#define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++)
60
61#define D_TRINE(val, cond1, cond2, true1, true2, def) \
62 (val == (cond1) ? true1 : \
63 (val == (cond2) ? true2 : def))
64
65/* forward */
66struct qed_ptt_pool;
67struct qed_spq;
68struct qed_sb_info;
69struct qed_sb_attn_info;
70struct qed_cxt_mngr;
71struct qed_sb_sp_info;
72struct qed_mcp_info;
73
74struct qed_rt_data {
fc48b7a6
YM
75 u32 *init_val;
76 bool *b_valid;
fe56b9e6
YM
77};
78
464f6645
MC
79enum qed_tunn_mode {
80 QED_MODE_L2GENEVE_TUNN,
81 QED_MODE_IPGENEVE_TUNN,
82 QED_MODE_L2GRE_TUNN,
83 QED_MODE_IPGRE_TUNN,
84 QED_MODE_VXLAN_TUNN,
85};
86
87enum qed_tunn_clss {
88 QED_TUNN_CLSS_MAC_VLAN,
89 QED_TUNN_CLSS_MAC_VNI,
90 QED_TUNN_CLSS_INNER_MAC_VLAN,
91 QED_TUNN_CLSS_INNER_MAC_VNI,
92 MAX_QED_TUNN_CLSS,
93};
94
95struct qed_tunn_start_params {
96 unsigned long tunn_mode;
97 u16 vxlan_udp_port;
98 u16 geneve_udp_port;
99 u8 update_vxlan_udp_port;
100 u8 update_geneve_udp_port;
101 u8 tunn_clss_vxlan;
102 u8 tunn_clss_l2geneve;
103 u8 tunn_clss_ipgeneve;
104 u8 tunn_clss_l2gre;
105 u8 tunn_clss_ipgre;
106};
107
108struct qed_tunn_update_params {
109 unsigned long tunn_mode_update_mask;
110 unsigned long tunn_mode;
111 u16 vxlan_udp_port;
112 u16 geneve_udp_port;
113 u8 update_rx_pf_clss;
114 u8 update_tx_pf_clss;
115 u8 update_vxlan_udp_port;
116 u8 update_geneve_udp_port;
117 u8 tunn_clss_vxlan;
118 u8 tunn_clss_l2geneve;
119 u8 tunn_clss_ipgeneve;
120 u8 tunn_clss_l2gre;
121 u8 tunn_clss_ipgre;
122};
123
fe56b9e6
YM
124/* The PCI personality is not quite synonymous to protocol ID:
125 * 1. All personalities need CORE connections
126 * 2. The Ethernet personality may support also the RoCE protocol
127 */
128enum qed_pci_personality {
129 QED_PCI_ETH,
c5ac9319
YM
130 QED_PCI_ISCSI,
131 QED_PCI_ETH_ROCE,
fe56b9e6
YM
132 QED_PCI_DEFAULT /* default in shmem */
133};
134
135/* All VFs are symmetric, all counters are PF + all VFs */
136struct qed_qm_iids {
137 u32 cids;
138 u32 vf_cids;
139 u32 tids;
140};
141
142enum QED_RESOURCES {
143 QED_SB,
25c089d7 144 QED_L2_QUEUE,
fe56b9e6 145 QED_VPORT,
25c089d7 146 QED_RSS_ENG,
fe56b9e6
YM
147 QED_PQ,
148 QED_RL,
25c089d7
YM
149 QED_MAC,
150 QED_VLAN,
fe56b9e6
YM
151 QED_ILT,
152 QED_MAX_RESC,
153};
154
25c089d7
YM
155enum QED_FEATURE {
156 QED_PF_L2_QUE,
32a47e72 157 QED_VF,
25c089d7
YM
158 QED_MAX_FEATURES,
159};
160
cc875c2e
YM
161enum QED_PORT_MODE {
162 QED_PORT_MODE_DE_2X40G,
163 QED_PORT_MODE_DE_2X50G,
164 QED_PORT_MODE_DE_1X100G,
165 QED_PORT_MODE_DE_4X10G_F,
166 QED_PORT_MODE_DE_4X10G_E,
167 QED_PORT_MODE_DE_4X20G,
168 QED_PORT_MODE_DE_1X40G,
169 QED_PORT_MODE_DE_2X25G,
170 QED_PORT_MODE_DE_1X25G
171};
172
fc48b7a6
YM
173enum qed_dev_cap {
174 QED_DEV_CAP_ETH,
c5ac9319
YM
175 QED_DEV_CAP_ISCSI,
176 QED_DEV_CAP_ROCE,
fc48b7a6
YM
177};
178
fe56b9e6
YM
179struct qed_hw_info {
180 /* PCI personality */
181 enum qed_pci_personality personality;
182
183 /* Resource Allocation scheme results */
184 u32 resc_start[QED_MAX_RESC];
185 u32 resc_num[QED_MAX_RESC];
25c089d7 186 u32 feat_num[QED_MAX_FEATURES];
fe56b9e6
YM
187
188#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
189#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
dbb799c3
YM
190#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
191 RESC_NUM(_p_hwfn, resc))
fe56b9e6
YM
192#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
193
194 u8 num_tc;
195 u8 offload_tc;
196 u8 non_offload_tc;
197
198 u32 concrete_fid;
199 u16 opaque_fid;
200 u16 ovlan;
201 u32 part_num[4];
202
fe56b9e6
YM
203 unsigned char hw_mac_addr[ETH_ALEN];
204
205 struct qed_igu_info *p_igu_info;
206
207 u32 port_mode;
208 u32 hw_mode;
fc48b7a6 209 unsigned long device_capabilities;
fe56b9e6
YM
210};
211
212struct qed_hw_cid_data {
213 u32 cid;
214 bool b_cid_allocated;
215
216 /* Additional identifiers */
217 u16 opaque_fid;
218 u8 vport_id;
219};
220
221/* maximun size of read/write commands (HW limit) */
222#define DMAE_MAX_RW_SIZE 0x2000
223
224struct qed_dmae_info {
225 /* Mutex for synchronizing access to functions */
226 struct mutex mutex;
227
228 u8 channel;
229
230 dma_addr_t completion_word_phys_addr;
231
232 /* The memory location where the DMAE writes the completion
233 * value when an operation is finished on this context.
234 */
235 u32 *p_completion_word;
236
237 dma_addr_t intermediate_buffer_phys_addr;
238
239 /* An intermediate buffer for DMAE operations that use virtual
240 * addresses - data is DMA'd to/from this buffer and then
241 * memcpy'd to/from the virtual address
242 */
243 u32 *p_intermediate_buffer;
244
245 dma_addr_t dmae_cmd_phys_addr;
246 struct dmae_cmd *p_dmae_cmd;
247};
248
bcd197c8
MC
249struct qed_wfq_data {
250 /* when feature is configured for at least 1 vport */
251 u32 min_speed;
252 bool configured;
253};
254
fe56b9e6
YM
255struct qed_qm_info {
256 struct init_qm_pq_params *qm_pq_params;
257 struct init_qm_vport_params *qm_vport_params;
258 struct init_qm_port_params *qm_port_params;
259 u16 start_pq;
260 u8 start_vport;
261 u8 pure_lb_pq;
262 u8 offload_pq;
263 u8 pure_ack_pq;
dbb799c3 264 u8 ooo_pq;
fe56b9e6
YM
265 u8 vf_queues_offset;
266 u16 num_pqs;
267 u16 num_vf_pqs;
268 u8 num_vports;
269 u8 max_phys_tcs_per_port;
270 bool pf_rl_en;
271 bool pf_wfq_en;
272 bool vport_rl_en;
273 bool vport_wfq_en;
274 u8 pf_wfq;
275 u32 pf_rl;
bcd197c8 276 struct qed_wfq_data *wfq_data;
dbb799c3 277 u8 num_pf_rls;
fe56b9e6
YM
278};
279
9df2ed04
MC
280struct storm_stats {
281 u32 address;
282 u32 len;
283};
284
285struct qed_storm_stats {
286 struct storm_stats mstats;
287 struct storm_stats pstats;
288 struct storm_stats tstats;
289 struct storm_stats ustats;
290};
291
fe56b9e6 292struct qed_fw_data {
9df2ed04 293 struct fw_ver_info *fw_ver_info;
fe56b9e6
YM
294 const u8 *modes_tree_buf;
295 union init_op *init_ops;
296 const u32 *arr_data;
297 u32 init_ops_size;
298};
299
300struct qed_simd_fp_handler {
301 void *token;
302 void (*func)(void *);
303};
304
305struct qed_hwfn {
306 struct qed_dev *cdev;
307 u8 my_id; /* ID inside the PF */
308#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
309 u8 rel_pf_id; /* Relative to engine*/
310 u8 abs_pf_id;
311#define QED_PATH_ID(_p_hwfn) ((_p_hwfn)->abs_pf_id & 1)
312 u8 port_id;
313 bool b_active;
314
315 u32 dp_module;
316 u8 dp_level;
317 char name[NAME_SIZE];
318
319 bool first_on_engine;
320 bool hw_init_done;
321
1408cc1f 322 u8 num_funcs_on_engine;
dbb799c3 323 u8 enabled_func_idx;
1408cc1f 324
fe56b9e6
YM
325 /* BAR access */
326 void __iomem *regview;
327 void __iomem *doorbells;
328 u64 db_phys_addr;
329 unsigned long db_size;
330
331 /* PTT pool */
332 struct qed_ptt_pool *p_ptt_pool;
333
334 /* HW info */
335 struct qed_hw_info hw_info;
336
337 /* rt_array (for init-tool) */
fc48b7a6 338 struct qed_rt_data rt_data;
fe56b9e6
YM
339
340 /* SPQ */
341 struct qed_spq *p_spq;
342
343 /* EQ */
344 struct qed_eq *p_eq;
345
346 /* Consolidate Q*/
347 struct qed_consq *p_consq;
348
349 /* Slow-Path definitions */
350 struct tasklet_struct *sp_dpc;
351 bool b_sp_dpc_enabled;
352
353 struct qed_ptt *p_main_ptt;
354 struct qed_ptt *p_dpc_ptt;
355
356 struct qed_sb_sp_info *p_sp_sb;
357 struct qed_sb_attn_info *p_sb_attn;
358
359 /* Protocol related */
360 struct qed_pf_params pf_params;
361
dbb799c3
YM
362 bool b_rdma_enabled_in_prs;
363 u32 rdma_prs_search_reg;
364
fe56b9e6
YM
365 /* Array of sb_info of all status blocks */
366 struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
367 u16 num_sbs;
368
369 struct qed_cxt_mngr *p_cxt_mngr;
370
371 /* Flag indicating whether interrupts are enabled or not*/
372 bool b_int_enabled;
8f16bc97 373 bool b_int_requested;
fe56b9e6 374
fc916ff2
SRK
375 /* True if the driver requests for the link */
376 bool b_drv_link_init;
377
1408cc1f 378 struct qed_vf_iov *vf_iov_info;
32a47e72 379 struct qed_pf_iov *pf_iov_info;
fe56b9e6
YM
380 struct qed_mcp_info *mcp_info;
381
39651abd
SRK
382 struct qed_dcbx_info *p_dcbx_info;
383
25c089d7
YM
384 struct qed_hw_cid_data *p_tx_cids;
385 struct qed_hw_cid_data *p_rx_cids;
386
fe56b9e6
YM
387 struct qed_dmae_info dmae_info;
388
389 /* QM init */
390 struct qed_qm_info qm_info;
9df2ed04 391 struct qed_storm_stats storm_stats;
fe56b9e6
YM
392
393 /* Buffer for unzipping firmware data */
394 void *unzip_buf;
395
396 struct qed_simd_fp_handler simd_proto_handler[64];
397
37bff2b9
YM
398#ifdef CONFIG_QED_SRIOV
399 struct workqueue_struct *iov_wq;
400 struct delayed_work iov_task;
401 unsigned long iov_task_flags;
402#endif
403
fe56b9e6
YM
404 struct z_stream_s *stream;
405};
406
407struct pci_params {
408 int pm_cap;
409
410 unsigned long mem_start;
411 unsigned long mem_end;
412 unsigned int irq;
413 u8 pf_num;
414};
415
416struct qed_int_param {
417 u32 int_mode;
418 u8 num_vectors;
419 u8 min_msix_cnt; /* for minimal functionality */
420};
421
422struct qed_int_params {
423 struct qed_int_param in;
424 struct qed_int_param out;
425 struct msix_entry *msix_table;
426 bool fp_initialized;
427 u8 fp_msix_base;
428 u8 fp_msix_cnt;
429};
430
431struct qed_dev {
432 u32 dp_module;
433 u8 dp_level;
434 char name[NAME_SIZE];
435
436 u8 type;
fc48b7a6
YM
437#define QED_DEV_TYPE_BB (0 << 0)
438#define QED_DEV_TYPE_AH BIT(0)
439/* Translate type/revision combo into the proper conditions */
440#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
441#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
442 CHIP_REV_IS_A0(dev))
443#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
444 CHIP_REV_IS_B0(dev))
445
446#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
447 QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
448
449 u16 vendor_id;
450 u16 device_id;
fe56b9e6
YM
451
452 u16 chip_num;
453#define CHIP_NUM_MASK 0xffff
454#define CHIP_NUM_SHIFT 16
455
456 u16 chip_rev;
457#define CHIP_REV_MASK 0xf
458#define CHIP_REV_SHIFT 12
fc48b7a6
YM
459#define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev)
460#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
fe56b9e6
YM
461
462 u16 chip_metal;
463#define CHIP_METAL_MASK 0xff
464#define CHIP_METAL_SHIFT 4
465
466 u16 chip_bond_id;
467#define CHIP_BOND_ID_MASK 0xf
468#define CHIP_BOND_ID_SHIFT 0
469
470 u8 num_engines;
471 u8 num_ports_in_engines;
472 u8 num_funcs_in_port;
473
474 u8 path_id;
fc48b7a6
YM
475 enum qed_mf_mode mf_mode;
476#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT)
477#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
478#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)
fe56b9e6
YM
479
480 int pcie_width;
481 int pcie_speed;
482 u8 ver_str[VER_SIZE];
483
484 /* Add MF related configuration */
485 u8 mcp_rev;
486 u8 boot_mode;
487
488 u8 wol;
489
490 u32 int_mode;
491 enum qed_coalescing_mode int_coalescing_mode;
51d99880
SRK
492 u16 rx_coalesce_usecs;
493 u16 tx_coalesce_usecs;
fe56b9e6
YM
494
495 /* Start Bar offset of first hwfn */
496 void __iomem *regview;
497 void __iomem *doorbells;
498 u64 db_phys_addr;
499 unsigned long db_size;
500
501 /* PCI */
502 u8 cache_shift;
503
504 /* Init */
505 const struct iro *iro_arr;
506#define IRO (p_hwfn->cdev->iro_arr)
507
508 /* HW functions */
509 u8 num_hwfns;
510 struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
511
32a47e72
YM
512 /* SRIOV */
513 struct qed_hw_sriov_info *p_iov_info;
514#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
515
464f6645 516 unsigned long tunn_mode;
1408cc1f
YM
517
518 bool b_is_vf;
fe56b9e6
YM
519 u32 drv_type;
520
521 struct qed_eth_stats *reset_stats;
522 struct qed_fw_data *fw_data;
523
524 u32 mcp_nvm_resp;
525
526 /* Linux specific here */
527 struct qede_dev *edev;
528 struct pci_dev *pdev;
529 int msg_enable;
530
531 struct pci_params pci_params;
532
533 struct qed_int_params int_params;
534
535 u8 protocol;
536#define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH)
537
cc875c2e
YM
538 /* Callbacks to protocol driver */
539 union {
540 struct qed_common_cb_ops *common;
541 struct qed_eth_cb_ops *eth;
542 } protocol_ops;
543 void *ops_cookie;
544
fe56b9e6
YM
545 const struct firmware *firmware;
546};
547
32a47e72 548#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
dacd88d6 549#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB
fe56b9e6
YM
550#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
551#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
552
553/**
554 * @brief qed_concrete_to_sw_fid - get the sw function id from
555 * the concrete value.
556 *
557 * @param concrete_fid
558 *
559 * @return inline u8
560 */
561static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
562 u32 concrete_fid)
563{
4870e704 564 u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
fe56b9e6 565 u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
4870e704
YM
566 u8 vf_valid = GET_FIELD(concrete_fid,
567 PXP_CONCRETE_FID_VFVALID);
568 u8 sw_fid;
fe56b9e6 569
4870e704
YM
570 if (vf_valid)
571 sw_fid = vfid + MAX_NUM_PFS;
572 else
573 sw_fid = pfid;
574
575 return sw_fid;
fe56b9e6
YM
576}
577
578#define PURE_LB_TC 8
dbb799c3 579#define OOO_LB_TC 9
fe56b9e6 580
733def6a 581int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
bcd197c8
MC
582void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
583
733def6a 584void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
fe56b9e6
YM
585#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
586
587/* Other Linux specific common definitions */
588#define DP_NAME(cdev) ((cdev)->name)
589
590#define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\
591 (cdev->regview) + \
592 (offset))
593
594#define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
595#define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
596#define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset))
597
598#define DOORBELL(cdev, db_addr, val) \
599 writel((u32)val, (void __iomem *)((u8 __iomem *)\
600 (cdev->doorbells) + (db_addr)))
601
602/* Prototypes */
603int qed_fill_dev_info(struct qed_dev *cdev,
604 struct qed_dev_info *dev_info);
cc875c2e 605void qed_link_update(struct qed_hwfn *hwfn);
fe56b9e6
YM
606u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
607 u32 input_len, u8 *input_buf,
608 u32 max_size, u8 *unzip_buf);
609
8f16bc97
SK
610int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
611
fe56b9e6 612#endif /* _QED_H */