]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/qed/qed_if.h
qed: Manage with less memory regions for RoCE
[mirror_ubuntu-artful-kernel.git] / include / linux / qed / qed_if.h
CommitLineData
fe56b9e6 1/* QLogic qed NIC Driver
e8f1cb50 2 * Copyright (c) 2015-2017 QLogic Corporation
fe56b9e6 3 *
e8f1cb50
MY
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
fe56b9e6 9 *
e8f1cb50
MY
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
fe56b9e6
YM
31 */
32
33#ifndef _QED_IF_H
34#define _QED_IF_H
35
36#include <linux/types.h>
37#include <linux/interrupt.h>
38#include <linux/netdevice.h>
39#include <linux/pci.h>
40#include <linux/skbuff.h>
41#include <linux/types.h>
42#include <asm/byteorder.h>
43#include <linux/io.h>
44#include <linux/compiler.h>
45#include <linux/kernel.h>
46#include <linux/list.h>
47#include <linux/slab.h>
48#include <linux/qed/common_hsi.h>
49#include <linux/qed/qed_chain.h>
50
39651abd
SRK
51enum dcbx_protocol_type {
52 DCBX_PROTOCOL_ISCSI,
53 DCBX_PROTOCOL_FCOE,
54 DCBX_PROTOCOL_ROCE,
55 DCBX_PROTOCOL_ROCE_V2,
56 DCBX_PROTOCOL_ETH,
57 DCBX_MAX_PROTOCOL_TYPE
58};
59
51ff1725
RA
60#define QED_ROCE_PROTOCOL_INDEX (3)
61
6ad8c632
SRK
62#define QED_LLDP_CHASSIS_ID_STAT_LEN 4
63#define QED_LLDP_PORT_ID_STAT_LEN 4
64#define QED_DCBX_MAX_APP_PROTOCOL 32
65#define QED_MAX_PFC_PRIORITIES 8
66#define QED_DCBX_DSCP_SIZE 64
67
68struct qed_dcbx_lldp_remote {
69 u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
70 u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN];
71 bool enable_rx;
72 bool enable_tx;
73 u32 tx_interval;
74 u32 max_credit;
75};
76
77struct qed_dcbx_lldp_local {
78 u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
79 u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN];
80};
81
82struct qed_dcbx_app_prio {
83 u8 roce;
84 u8 roce_v2;
85 u8 fcoe;
86 u8 iscsi;
87 u8 eth;
88};
89
90struct qed_dbcx_pfc_params {
91 bool willing;
92 bool enabled;
93 u8 prio[QED_MAX_PFC_PRIORITIES];
94 u8 max_tc;
95};
96
59bcb797
SRK
97enum qed_dcbx_sf_ieee_type {
98 QED_DCBX_SF_IEEE_ETHTYPE,
99 QED_DCBX_SF_IEEE_TCP_PORT,
100 QED_DCBX_SF_IEEE_UDP_PORT,
101 QED_DCBX_SF_IEEE_TCP_UDP_PORT
102};
103
6ad8c632
SRK
104struct qed_app_entry {
105 bool ethtype;
59bcb797 106 enum qed_dcbx_sf_ieee_type sf_ieee;
6ad8c632
SRK
107 bool enabled;
108 u8 prio;
109 u16 proto_id;
110 enum dcbx_protocol_type proto_type;
111};
112
113struct qed_dcbx_params {
114 struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL];
115 u16 num_app_entries;
116 bool app_willing;
117 bool app_valid;
118 bool app_error;
119 bool ets_willing;
120 bool ets_enabled;
121 bool ets_cbs;
122 bool valid;
123 u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES];
124 u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES];
125 u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES];
126 struct qed_dbcx_pfc_params pfc;
127 u8 max_ets_tc;
128};
129
130struct qed_dcbx_admin_params {
131 struct qed_dcbx_params params;
132 bool valid;
133};
134
135struct qed_dcbx_remote_params {
136 struct qed_dcbx_params params;
137 bool valid;
138};
139
140struct qed_dcbx_operational_params {
141 struct qed_dcbx_app_prio app_prio;
142 struct qed_dcbx_params params;
143 bool valid;
144 bool enabled;
145 bool ieee;
146 bool cee;
147 u32 err;
148};
149
150struct qed_dcbx_get {
151 struct qed_dcbx_operational_params operational;
152 struct qed_dcbx_lldp_remote lldp_remote;
153 struct qed_dcbx_lldp_local lldp_local;
154 struct qed_dcbx_remote_params remote;
155 struct qed_dcbx_admin_params local;
156};
6ad8c632 157
91420b83
SK
158enum qed_led_mode {
159 QED_LED_MODE_OFF,
160 QED_LED_MODE_ON,
161 QED_LED_MODE_RESTORE
162};
163
fe56b9e6
YM
164#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
165 (void __iomem *)(reg_addr))
166
167#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
168
169#define QED_COALESCE_MAX 0xFF
0e191827 170#define QED_DEFAULT_RX_USECS 12
fe56b9e6
YM
171
172/* forward */
173struct qed_dev;
174
175struct qed_eth_pf_params {
176 /* The following parameters are used during HW-init
177 * and these parameters need to be passed as arguments
178 * to update_pf_params routine invoked before slowpath start
179 */
180 u16 num_cons;
181};
182
1e128c81
AE
183struct qed_fcoe_pf_params {
184 /* The following parameters are used during protocol-init */
185 u64 glbl_q_params_addr;
186 u64 bdq_pbl_base_addr[2];
187
188 /* The following parameters are used during HW-init
189 * and these parameters need to be passed as arguments
190 * to update_pf_params routine invoked before slowpath start
191 */
192 u16 num_cons;
193 u16 num_tasks;
194
195 /* The following parameters are used during protocol-init */
196 u16 sq_num_pbl_pages;
197
198 u16 cq_num_entries;
199 u16 cmdq_num_entries;
200 u16 rq_buffer_log_size;
201 u16 mtu;
202 u16 dummy_icid;
203 u16 bdq_xoff_threshold[2];
204 u16 bdq_xon_threshold[2];
205 u16 rq_buffer_size;
206 u8 num_cqs; /* num of global CQs */
207 u8 log_page_size;
208 u8 gl_rq_pi;
209 u8 gl_cmd_pi;
210 u8 debug_mode;
211 u8 is_target;
212 u8 bdq_pbl_num_entries[2];
213};
214
c5ac9319
YM
215/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
216struct qed_iscsi_pf_params {
217 u64 glbl_q_params_addr;
218 u64 bdq_pbl_base_addr[2];
219 u32 max_cwnd;
220 u16 cq_num_entries;
221 u16 cmdq_num_entries;
fc831825 222 u32 two_msl_timer;
c5ac9319
YM
223 u16 dup_ack_threshold;
224 u16 tx_sws_timer;
225 u16 min_rto;
226 u16 min_rto_rt;
227 u16 max_rto;
228
229 /* The following parameters are used during HW-init
230 * and these parameters need to be passed as arguments
231 * to update_pf_params routine invoked before slowpath start
232 */
233 u16 num_cons;
234 u16 num_tasks;
235
236 /* The following parameters are used during protocol-init */
237 u16 half_way_close_timeout;
238 u16 bdq_xoff_threshold[2];
239 u16 bdq_xon_threshold[2];
240 u16 cmdq_xoff_threshold;
241 u16 cmdq_xon_threshold;
242 u16 rq_buffer_size;
243
244 u8 num_sq_pages_in_ring;
245 u8 num_r2tq_pages_in_ring;
246 u8 num_uhq_pages_in_ring;
247 u8 num_queues;
248 u8 log_page_size;
249 u8 rqe_log_size;
250 u8 max_fin_rt;
251 u8 gl_rq_pi;
252 u8 gl_cmd_pi;
253 u8 debug_mode;
254 u8 ll2_ooo_queue_id;
255 u8 ooo_enable;
256
257 u8 is_target;
258 u8 bdq_pbl_num_entries[2];
259};
260
261struct qed_rdma_pf_params {
262 /* Supplied to QED during resource allocation (may affect the ILT and
263 * the doorbell BAR).
264 */
265 u32 min_dpis; /* number of requested DPIs */
c5ac9319
YM
266 u32 num_qps; /* number of requested Queue Pairs */
267 u32 num_srqs; /* number of requested SRQ */
268 u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */
269 u8 gl_pi; /* protocol index */
270
271 /* Will allocate rate limiters to be used with QPs */
272 u8 enable_dcqcn;
273};
274
fe56b9e6
YM
275struct qed_pf_params {
276 struct qed_eth_pf_params eth_pf_params;
1e128c81 277 struct qed_fcoe_pf_params fcoe_pf_params;
c5ac9319
YM
278 struct qed_iscsi_pf_params iscsi_pf_params;
279 struct qed_rdma_pf_params rdma_pf_params;
fe56b9e6
YM
280};
281
282enum qed_int_mode {
283 QED_INT_MODE_INTA,
284 QED_INT_MODE_MSIX,
285 QED_INT_MODE_MSI,
286 QED_INT_MODE_POLL,
287};
288
289struct qed_sb_info {
290 struct status_block *sb_virt;
291 dma_addr_t sb_phys;
292 u32 sb_ack; /* Last given ack */
293 u16 igu_sb_id;
294 void __iomem *igu_addr;
295 u8 flags;
296#define QED_SB_INFO_INIT 0x1
297#define QED_SB_INFO_SETUP 0x2
298
299 struct qed_dev *cdev;
300};
301
9c79ddaa
MY
302enum qed_dev_type {
303 QED_DEV_TYPE_BB,
304 QED_DEV_TYPE_AH,
305};
306
fe56b9e6
YM
307struct qed_dev_info {
308 unsigned long pci_mem_start;
309 unsigned long pci_mem_end;
310 unsigned int pci_irq;
311 u8 num_hwfns;
312
313 u8 hw_mac[ETH_ALEN];
fc48b7a6 314 bool is_mf_default;
fe56b9e6
YM
315
316 /* FW version */
317 u16 fw_major;
318 u16 fw_minor;
319 u16 fw_rev;
320 u16 fw_eng;
321
322 /* MFW version */
323 u32 mfw_rev;
324
325 u32 flash_size;
326 u8 mf_mode;
831bfb0e 327 bool tx_switching;
cee9fbd8 328 bool rdma_supported;
0fefbfba 329 u16 mtu;
14d39648
MY
330
331 bool wol_support;
9c79ddaa
MY
332
333 enum qed_dev_type dev_type;
fe56b9e6
YM
334};
335
336enum qed_sb_type {
337 QED_SB_TYPE_L2_QUEUE,
51ff1725 338 QED_SB_TYPE_CNQ,
fc831825 339 QED_SB_TYPE_STORAGE,
fe56b9e6
YM
340};
341
342enum qed_protocol {
343 QED_PROTOCOL_ETH,
c5ac9319 344 QED_PROTOCOL_ISCSI,
1e128c81 345 QED_PROTOCOL_FCOE,
fe56b9e6
YM
346};
347
054c67d1
SRK
348enum qed_link_mode_bits {
349 QED_LM_FIBRE_BIT = BIT(0),
350 QED_LM_Autoneg_BIT = BIT(1),
351 QED_LM_Asym_Pause_BIT = BIT(2),
352 QED_LM_Pause_BIT = BIT(3),
353 QED_LM_1000baseT_Half_BIT = BIT(4),
354 QED_LM_1000baseT_Full_BIT = BIT(5),
355 QED_LM_10000baseKR_Full_BIT = BIT(6),
356 QED_LM_25000baseKR_Full_BIT = BIT(7),
357 QED_LM_40000baseLR4_Full_BIT = BIT(8),
358 QED_LM_50000baseKR2_Full_BIT = BIT(9),
359 QED_LM_100000baseKR4_Full_BIT = BIT(10),
360 QED_LM_COUNT = 11
361};
362
fe56b9e6
YM
363struct qed_link_params {
364 bool link_up;
365
366#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
367#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
368#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
369#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
03dc76ca 370#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
fe56b9e6
YM
371 u32 override_flags;
372 bool autoneg;
373 u32 adv_speeds;
374 u32 forced_speed;
375#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
376#define QED_LINK_PAUSE_RX_ENABLE BIT(1)
377#define QED_LINK_PAUSE_TX_ENABLE BIT(2)
378 u32 pause_config;
03dc76ca
SRK
379#define QED_LINK_LOOPBACK_NONE BIT(0)
380#define QED_LINK_LOOPBACK_INT_PHY BIT(1)
381#define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
382#define QED_LINK_LOOPBACK_EXT BIT(3)
383#define QED_LINK_LOOPBACK_MAC BIT(4)
384 u32 loopback_mode;
fe56b9e6
YM
385};
386
387struct qed_link_output {
388 bool link_up;
389
d194fd26
YM
390 /* In QED_LM_* defs */
391 u32 supported_caps;
392 u32 advertised_caps;
393 u32 lp_caps;
394
fe56b9e6
YM
395 u32 speed; /* In Mb/s */
396 u8 duplex; /* In DUPLEX defs */
397 u8 port; /* In PORT defs */
398 bool autoneg;
399 u32 pause_config;
400};
401
1408cc1f
YM
402struct qed_probe_params {
403 enum qed_protocol protocol;
404 u32 dp_module;
405 u8 dp_level;
406 bool is_vf;
407};
408
fe56b9e6
YM
409#define QED_DRV_VER_STR_SIZE 12
410struct qed_slowpath_params {
411 u32 int_mode;
412 u8 drv_major;
413 u8 drv_minor;
414 u8 drv_rev;
415 u8 drv_eng;
416 u8 name[QED_DRV_VER_STR_SIZE];
417};
418
419#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
420
421struct qed_int_info {
422 struct msix_entry *msix;
423 u8 msix_cnt;
424
425 /* This should be updated by the protocol driver */
426 u8 used_cnt;
427};
428
429struct qed_common_cb_ops {
430 void (*link_update)(void *dev,
431 struct qed_link_output *link);
1e128c81 432 void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
fe56b9e6
YM
433};
434
03dc76ca
SRK
435struct qed_selftest_ops {
436/**
437 * @brief selftest_interrupt - Perform interrupt test
438 *
439 * @param cdev
440 *
441 * @return 0 on success, error otherwise.
442 */
443 int (*selftest_interrupt)(struct qed_dev *cdev);
444
445/**
446 * @brief selftest_memory - Perform memory test
447 *
448 * @param cdev
449 *
450 * @return 0 on success, error otherwise.
451 */
452 int (*selftest_memory)(struct qed_dev *cdev);
453
454/**
455 * @brief selftest_register - Perform register test
456 *
457 * @param cdev
458 *
459 * @return 0 on success, error otherwise.
460 */
461 int (*selftest_register)(struct qed_dev *cdev);
462
463/**
464 * @brief selftest_clock - Perform clock test
465 *
466 * @param cdev
467 *
468 * @return 0 on success, error otherwise.
469 */
470 int (*selftest_clock)(struct qed_dev *cdev);
7a4b21b7
MY
471
472/**
473 * @brief selftest_nvram - Perform nvram test
474 *
475 * @param cdev
476 *
477 * @return 0 on success, error otherwise.
478 */
479 int (*selftest_nvram) (struct qed_dev *cdev);
03dc76ca
SRK
480};
481
fe56b9e6 482struct qed_common_ops {
03dc76ca
SRK
483 struct qed_selftest_ops *selftest;
484
fe56b9e6 485 struct qed_dev* (*probe)(struct pci_dev *dev,
1408cc1f 486 struct qed_probe_params *params);
fe56b9e6
YM
487
488 void (*remove)(struct qed_dev *cdev);
489
490 int (*set_power_state)(struct qed_dev *cdev,
491 pci_power_t state);
492
493 void (*set_id)(struct qed_dev *cdev,
494 char name[],
495 char ver_str[]);
496
497 /* Client drivers need to make this call before slowpath_start.
498 * PF params required for the call before slowpath_start is
499 * documented within the qed_pf_params structure definition.
500 */
501 void (*update_pf_params)(struct qed_dev *cdev,
502 struct qed_pf_params *params);
503 int (*slowpath_start)(struct qed_dev *cdev,
504 struct qed_slowpath_params *params);
505
506 int (*slowpath_stop)(struct qed_dev *cdev);
507
508 /* Requests to use `cnt' interrupts for fastpath.
509 * upon success, returns number of interrupts allocated for fastpath.
510 */
511 int (*set_fp_int)(struct qed_dev *cdev,
512 u16 cnt);
513
514 /* Fills `info' with pointers required for utilizing interrupts */
515 int (*get_fp_int)(struct qed_dev *cdev,
516 struct qed_int_info *info);
517
518 u32 (*sb_init)(struct qed_dev *cdev,
519 struct qed_sb_info *sb_info,
520 void *sb_virt_addr,
521 dma_addr_t sb_phy_addr,
522 u16 sb_id,
523 enum qed_sb_type type);
524
525 u32 (*sb_release)(struct qed_dev *cdev,
526 struct qed_sb_info *sb_info,
527 u16 sb_id);
528
529 void (*simd_handler_config)(struct qed_dev *cdev,
530 void *token,
531 int index,
532 void (*handler)(void *));
533
534 void (*simd_handler_clean)(struct qed_dev *cdev,
535 int index);
1e128c81
AE
536 int (*dbg_grc)(struct qed_dev *cdev,
537 void *buffer, u32 *num_dumped_bytes);
538
539 int (*dbg_grc_size)(struct qed_dev *cdev);
fe7cd2bf 540
e0971c83
TT
541 int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
542
543 int (*dbg_all_data_size) (struct qed_dev *cdev);
544
fe7cd2bf
YM
545/**
546 * @brief can_link_change - can the instance change the link or not
547 *
548 * @param cdev
549 *
550 * @return true if link-change is allowed, false otherwise.
551 */
552 bool (*can_link_change)(struct qed_dev *cdev);
553
fe56b9e6
YM
554/**
555 * @brief set_link - set links according to params
556 *
557 * @param cdev
558 * @param params - values used to override the default link configuration
559 *
560 * @return 0 on success, error otherwise.
561 */
562 int (*set_link)(struct qed_dev *cdev,
563 struct qed_link_params *params);
564
565/**
566 * @brief get_link - returns the current link state.
567 *
568 * @param cdev
569 * @param if_link - structure to be filled with current link configuration.
570 */
571 void (*get_link)(struct qed_dev *cdev,
572 struct qed_link_output *if_link);
573
574/**
575 * @brief - drains chip in case Tx completions fail to arrive due to pause.
576 *
577 * @param cdev
578 */
579 int (*drain)(struct qed_dev *cdev);
580
581/**
582 * @brief update_msglvl - update module debug level
583 *
584 * @param cdev
585 * @param dp_module
586 * @param dp_level
587 */
588 void (*update_msglvl)(struct qed_dev *cdev,
589 u32 dp_module,
590 u8 dp_level);
591
592 int (*chain_alloc)(struct qed_dev *cdev,
593 enum qed_chain_use_mode intended_use,
594 enum qed_chain_mode mode,
a91eb52a
YM
595 enum qed_chain_cnt_type cnt_type,
596 u32 num_elems,
fe56b9e6
YM
597 size_t elem_size,
598 struct qed_chain *p_chain);
599
600 void (*chain_free)(struct qed_dev *cdev,
601 struct qed_chain *p_chain);
91420b83 602
722003ac
SRK
603/**
604 * @brief get_coalesce - Get coalesce parameters in usec
605 *
606 * @param cdev
607 * @param rx_coal - Rx coalesce value in usec
608 * @param tx_coal - Tx coalesce value in usec
609 *
610 */
611 void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal);
612
613/**
614 * @brief set_coalesce - Configure Rx coalesce value in usec
615 *
616 * @param cdev
617 * @param rx_coal - Rx coalesce value in usec
618 * @param tx_coal - Tx coalesce value in usec
619 * @param qid - Queue index
620 * @param sb_id - Status Block Id
621 *
622 * @return 0 on success, error otherwise.
623 */
624 int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
625 u8 qid, u16 sb_id);
626
91420b83
SK
627/**
628 * @brief set_led - Configure LED mode
629 *
630 * @param cdev
631 * @param mode - LED mode
632 *
633 * @return 0 on success, error otherwise.
634 */
635 int (*set_led)(struct qed_dev *cdev,
636 enum qed_led_mode mode);
0fefbfba
SK
637
638/**
639 * @brief update_drv_state - API to inform the change in the driver state.
640 *
641 * @param cdev
642 * @param active
643 *
644 */
645 int (*update_drv_state)(struct qed_dev *cdev, bool active);
646
647/**
648 * @brief update_mac - API to inform the change in the mac address
649 *
650 * @param cdev
651 * @param mac
652 *
653 */
654 int (*update_mac)(struct qed_dev *cdev, u8 *mac);
655
656/**
657 * @brief update_mtu - API to inform the change in the mtu
658 *
659 * @param cdev
660 * @param mtu
661 *
662 */
663 int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
14d39648
MY
664
665/**
666 * @brief update_wol - update of changes in the WoL configuration
667 *
668 * @param cdev
669 * @param enabled - true iff WoL should be enabled.
670 */
671 int (*update_wol) (struct qed_dev *cdev, bool enabled);
fe56b9e6
YM
672};
673
fe56b9e6
YM
674#define MASK_FIELD(_name, _value) \
675 ((_value) &= (_name ## _MASK))
676
677#define FIELD_VALUE(_name, _value) \
678 ((_value & _name ## _MASK) << _name ## _SHIFT)
679
680#define SET_FIELD(value, name, flag) \
681 do { \
682 (value) &= ~(name ## _MASK << name ## _SHIFT); \
683 (value) |= (((u64)flag) << (name ## _SHIFT)); \
684 } while (0)
685
686#define GET_FIELD(value, name) \
687 (((value) >> (name ## _SHIFT)) & name ## _MASK)
688
689/* Debug print definitions */
690#define DP_ERR(cdev, fmt, ...) \
691 pr_err("[%s:%d(%s)]" fmt, \
692 __func__, __LINE__, \
693 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
694 ## __VA_ARGS__) \
695
696#define DP_NOTICE(cdev, fmt, ...) \
697 do { \
698 if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
699 pr_notice("[%s:%d(%s)]" fmt, \
700 __func__, __LINE__, \
701 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
702 ## __VA_ARGS__); \
703 \
704 } \
705 } while (0)
706
707#define DP_INFO(cdev, fmt, ...) \
708 do { \
709 if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \
710 pr_notice("[%s:%d(%s)]" fmt, \
711 __func__, __LINE__, \
712 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
713 ## __VA_ARGS__); \
714 } \
715 } while (0)
716
717#define DP_VERBOSE(cdev, module, fmt, ...) \
718 do { \
719 if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \
720 ((cdev)->dp_module & module))) { \
721 pr_notice("[%s:%d(%s)]" fmt, \
722 __func__, __LINE__, \
723 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
724 ## __VA_ARGS__); \
725 } \
726 } while (0)
727
728enum DP_LEVEL {
729 QED_LEVEL_VERBOSE = 0x0,
730 QED_LEVEL_INFO = 0x1,
731 QED_LEVEL_NOTICE = 0x2,
732 QED_LEVEL_ERR = 0x3,
733};
734
735#define QED_LOG_LEVEL_SHIFT (30)
736#define QED_LOG_VERBOSE_MASK (0x3fffffff)
737#define QED_LOG_INFO_MASK (0x40000000)
738#define QED_LOG_NOTICE_MASK (0x80000000)
739
740enum DP_MODULE {
741 QED_MSG_SPQ = 0x10000,
742 QED_MSG_STATS = 0x20000,
743 QED_MSG_DCB = 0x40000,
744 QED_MSG_IOV = 0x80000,
745 QED_MSG_SP = 0x100000,
746 QED_MSG_STORAGE = 0x200000,
747 QED_MSG_CXT = 0x800000,
0a7fb11c 748 QED_MSG_LL2 = 0x1000000,
fe56b9e6 749 QED_MSG_ILT = 0x2000000,
51ff1725 750 QED_MSG_RDMA = 0x4000000,
fe56b9e6
YM
751 QED_MSG_DEBUG = 0x8000000,
752 /* to be added...up to 0x8000000 */
753};
754
fc48b7a6
YM
755enum qed_mf_mode {
756 QED_MF_DEFAULT,
757 QED_MF_OVLAN,
758 QED_MF_NPAR,
759};
760
9c79ddaa 761struct qed_eth_stats_common {
fe56b9e6
YM
762 u64 no_buff_discards;
763 u64 packet_too_big_discard;
764 u64 ttl0_discard;
765 u64 rx_ucast_bytes;
766 u64 rx_mcast_bytes;
767 u64 rx_bcast_bytes;
768 u64 rx_ucast_pkts;
769 u64 rx_mcast_pkts;
770 u64 rx_bcast_pkts;
771 u64 mftag_filter_discards;
772 u64 mac_filter_discards;
773 u64 tx_ucast_bytes;
774 u64 tx_mcast_bytes;
775 u64 tx_bcast_bytes;
776 u64 tx_ucast_pkts;
777 u64 tx_mcast_pkts;
778 u64 tx_bcast_pkts;
779 u64 tx_err_drop_pkts;
780 u64 tpa_coalesced_pkts;
781 u64 tpa_coalesced_events;
782 u64 tpa_aborts_num;
783 u64 tpa_not_coalesced_pkts;
784 u64 tpa_coalesced_bytes;
785
786 /* port */
787 u64 rx_64_byte_packets;
d4967cf3
YM
788 u64 rx_65_to_127_byte_packets;
789 u64 rx_128_to_255_byte_packets;
790 u64 rx_256_to_511_byte_packets;
791 u64 rx_512_to_1023_byte_packets;
792 u64 rx_1024_to_1518_byte_packets;
fe56b9e6
YM
793 u64 rx_crc_errors;
794 u64 rx_mac_crtl_frames;
795 u64 rx_pause_frames;
796 u64 rx_pfc_frames;
797 u64 rx_align_errors;
798 u64 rx_carrier_errors;
799 u64 rx_oversize_packets;
800 u64 rx_jabbers;
801 u64 rx_undersize_packets;
802 u64 rx_fragments;
803 u64 tx_64_byte_packets;
804 u64 tx_65_to_127_byte_packets;
805 u64 tx_128_to_255_byte_packets;
806 u64 tx_256_to_511_byte_packets;
807 u64 tx_512_to_1023_byte_packets;
808 u64 tx_1024_to_1518_byte_packets;
fe56b9e6
YM
809 u64 tx_pause_frames;
810 u64 tx_pfc_frames;
fe56b9e6
YM
811 u64 brb_truncates;
812 u64 brb_discards;
813 u64 rx_mac_bytes;
814 u64 rx_mac_uc_packets;
815 u64 rx_mac_mc_packets;
816 u64 rx_mac_bc_packets;
817 u64 rx_mac_frames_ok;
818 u64 tx_mac_bytes;
819 u64 tx_mac_uc_packets;
820 u64 tx_mac_mc_packets;
821 u64 tx_mac_bc_packets;
822 u64 tx_mac_ctrl_frames;
823};
824
9c79ddaa
MY
825struct qed_eth_stats_bb {
826 u64 rx_1519_to_1522_byte_packets;
827 u64 rx_1519_to_2047_byte_packets;
828 u64 rx_2048_to_4095_byte_packets;
829 u64 rx_4096_to_9216_byte_packets;
830 u64 rx_9217_to_16383_byte_packets;
831 u64 tx_1519_to_2047_byte_packets;
832 u64 tx_2048_to_4095_byte_packets;
833 u64 tx_4096_to_9216_byte_packets;
834 u64 tx_9217_to_16383_byte_packets;
835 u64 tx_lpi_entry_count;
836 u64 tx_total_collisions;
837};
838
839struct qed_eth_stats_ah {
840 u64 rx_1519_to_max_byte_packets;
841 u64 tx_1519_to_max_byte_packets;
842};
843
844struct qed_eth_stats {
845 struct qed_eth_stats_common common;
846
847 union {
848 struct qed_eth_stats_bb bb;
849 struct qed_eth_stats_ah ah;
850 };
851};
852
fe56b9e6
YM
853#define QED_SB_IDX 0x0002
854
855#define RX_PI 0
856#define TX_PI(tc) (RX_PI + 1 + tc)
857
4ac801b7
YM
858struct qed_sb_cnt_info {
859 int sb_cnt;
860 int sb_iov_cnt;
861 int sb_free_blk;
862};
863
fe56b9e6
YM
864static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
865{
866 u32 prod = 0;
867 u16 rc = 0;
868
869 prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
870 STATUS_BLOCK_PROD_INDEX_MASK;
871 if (sb_info->sb_ack != prod) {
872 sb_info->sb_ack = prod;
873 rc |= QED_SB_IDX;
874 }
875
876 /* Let SB update */
877 mmiowb();
878 return rc;
879}
880
881/**
882 *
883 * @brief This function creates an update command for interrupts that is
884 * written to the IGU.
885 *
886 * @param sb_info - This is the structure allocated and
887 * initialized per status block. Assumption is
888 * that it was initialized using qed_sb_init
889 * @param int_cmd - Enable/Disable/Nop
890 * @param upd_flg - whether igu consumer should be
891 * updated.
892 *
893 * @return inline void
894 */
895static inline void qed_sb_ack(struct qed_sb_info *sb_info,
896 enum igu_int_cmd int_cmd,
897 u8 upd_flg)
898{
899 struct igu_prod_cons_update igu_ack = { 0 };
900
901 igu_ack.sb_id_and_flags =
902 ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
903 (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
904 (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
905 (IGU_SEG_ACCESS_REG <<
906 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
907
908 DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags);
909
910 /* Both segments (interrupts & acks) are written to same place address;
911 * Need to guarantee all commands will be received (in-order) by HW.
912 */
913 mmiowb();
914 barrier();
915}
916
917static inline void __internal_ram_wr(void *p_hwfn,
918 void __iomem *addr,
919 int size,
920 u32 *data)
921
922{
923 unsigned int i;
924
925 for (i = 0; i < size / sizeof(*data); i++)
926 DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
927}
928
929static inline void internal_ram_wr(void __iomem *addr,
930 int size,
931 u32 *data)
932{
933 __internal_ram_wr(NULL, addr, size, data);
934}
935
8c5ebd0c
SRK
936enum qed_rss_caps {
937 QED_RSS_IPV4 = 0x1,
938 QED_RSS_IPV6 = 0x2,
939 QED_RSS_IPV4_TCP = 0x4,
940 QED_RSS_IPV6_TCP = 0x8,
941 QED_RSS_IPV4_UDP = 0x10,
942 QED_RSS_IPV6_UDP = 0x20,
943};
944
945#define QED_RSS_IND_TABLE_SIZE 128
946#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
fe56b9e6 947#endif