]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
Merge tag 'mac80211-next-for-davem-2018-03-29' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3vf / hclgevf_main.h
CommitLineData
e2cb1dec
SM
1/* SPDX-License-Identifier: GPL-2.0+ */
2/* Copyright (c) 2016-2017 Hisilicon Limited. */
3
4#ifndef __HCLGEVF_MAIN_H
5#define __HCLGEVF_MAIN_H
6#include <linux/fs.h>
7#include <linux/types.h>
8#include "hclge_mbx.h"
9#include "hclgevf_cmd.h"
10#include "hnae3.h"
11
12#define HCLGEVF_MOD_VERSION "v1.0"
13#define HCLGEVF_DRIVER_NAME "hclgevf"
14
15#define HCLGEVF_ROCEE_VECTOR_NUM 0
16#define HCLGEVF_MISC_VECTOR_NUM 0
17
18#define HCLGEVF_INVALID_VPORT 0xffff
19
20/* This number in actual depends upon the total number of VFs
21 * created by physical function. But the maximum number of
22 * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}.
23 */
24#define HCLGEVF_MAX_VF_VECTOR_NUM (32 + 1)
25
26#define HCLGEVF_VECTOR_REG_BASE 0x20000
27#define HCLGEVF_MISC_VECTOR_REG_BASE 0x20400
28#define HCLGEVF_VECTOR_REG_OFFSET 0x4
29#define HCLGEVF_VECTOR_VF_OFFSET 0x100000
30
31/* Vector0 interrupt CMDQ event source register(RW) */
32#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
33/* CMDQ register bits for RX event(=MBX event) */
34#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
35
36#define HCLGEVF_TQP_RESET_TRY_TIMES 10
6988eb2a
SM
37/* Reset related Registers */
38#define HCLGEVF_FUN_RST_ING 0x20C00
39#define HCLGEVF_FUN_RST_ING_B 0
e2cb1dec
SM
40
41#define HCLGEVF_RSS_IND_TBL_SIZE 512
42#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
43#define HCLGEVF_RSS_KEY_SIZE 40
44#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ 0
45#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1
46#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2
47#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
48#define HCLGEVF_RSS_CFG_TBL_NUM \
49 (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
50
51/* states of hclgevf device & tasks */
52enum hclgevf_states {
53 /* device states */
54 HCLGEVF_STATE_DOWN,
55 HCLGEVF_STATE_DISABLED,
56 /* task states */
57 HCLGEVF_STATE_SERVICE_SCHED,
35a1e503
SM
58 HCLGEVF_STATE_RST_SERVICE_SCHED,
59 HCLGEVF_STATE_RST_HANDLING,
e2cb1dec
SM
60 HCLGEVF_STATE_MBX_SERVICE_SCHED,
61 HCLGEVF_STATE_MBX_HANDLING,
62};
63
64#define HCLGEVF_MPF_ENBALE 1
65
66struct hclgevf_mac {
67 u8 mac_addr[ETH_ALEN];
68 int link;
4a152de9
FL
69 u8 duplex;
70 u32 speed;
e2cb1dec
SM
71};
72
73struct hclgevf_hw {
74 void __iomem *io_base;
75 int num_vec;
76 struct hclgevf_cmq cmq;
77 struct hclgevf_mac mac;
78 void *hdev; /* hchgevf device it is part of */
79};
80
81/* TQP stats */
82struct hlcgevf_tqp_stats {
83 /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
84 u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
85 /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
86 u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
87};
88
89struct hclgevf_tqp {
90 struct device *dev; /* device for DMA mapping */
91 struct hnae3_queue q;
92 struct hlcgevf_tqp_stats tqp_stats;
93 u16 index; /* global index in a NIC controller */
94
95 bool alloced;
96};
97
98struct hclgevf_cfg {
99 u8 vmdq_vport_num;
100 u8 tc_num;
101 u16 tqp_desc_num;
102 u16 rx_buf_len;
103 u8 phy_addr;
104 u8 media_type;
105 u8 mac_addr[ETH_ALEN];
106 u32 numa_node_map;
107};
108
109struct hclgevf_rss_cfg {
110 u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
111 u32 hash_algo;
112 u32 rss_size;
113 u8 hw_tc_map;
114 u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
115};
116
117struct hclgevf_misc_vector {
118 u8 __iomem *addr;
119 int vector_irq;
120};
121
122struct hclgevf_dev {
123 struct pci_dev *pdev;
124 struct hnae3_ae_dev *ae_dev;
125 struct hclgevf_hw hw;
126 struct hclgevf_misc_vector misc_vector;
127 struct hclgevf_rss_cfg rss_cfg;
128 unsigned long state;
129
436667d2
SM
130#define HCLGEVF_RESET_REQUESTED 0
131#define HCLGEVF_RESET_PENDING 1
132 unsigned long reset_state; /* requested, pending */
133 u32 reset_attempts;
134
e2cb1dec
SM
135 u32 fw_version;
136 u16 num_tqps; /* num task queue pairs of this PF */
137
138 u16 alloc_rss_size; /* allocated RSS task queue */
139 u16 rss_size_max; /* HW defined max RSS task queue */
140
141 u16 num_alloc_vport; /* num vports this driver supports */
142 u32 numa_node_mask;
143 u16 rx_buf_len;
144 u16 num_desc;
145 u8 hw_tc_map;
146
147 u16 num_msi;
148 u16 num_msi_left;
149 u16 num_msi_used;
150 u32 base_msi_vector;
151 u16 *vector_status;
152 int *vector_irq;
153
154 bool accept_mta_mc; /* whether to accept mta filter multicast */
07a0556a 155 bool mbx_event_pending;
e2cb1dec 156 struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
07a0556a 157 struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
e2cb1dec
SM
158
159 struct timer_list service_timer;
160 struct work_struct service_task;
35a1e503 161 struct work_struct rst_service_task;
e2cb1dec
SM
162 struct work_struct mbx_service_task;
163
164 struct hclgevf_tqp *htqp;
165
166 struct hnae3_handle nic;
167 struct hnae3_handle roce;
168
169 struct hnae3_client *nic_client;
170 struct hnae3_client *roce_client;
171 u32 flag;
172};
173
7a01c897
SM
174static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev)
175{
176 return (hdev &&
177 (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
178 (hdev->nic.reset_level == HNAE3_VF_RESET));
179}
180
181static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev)
182{
183 return (hdev &&
184 (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
185 (hdev->nic.reset_level == HNAE3_VF_FULL_RESET));
186}
187
e2cb1dec
SM
188int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
189 const u8 *msg_data, u8 msg_len, bool need_resp,
190 u8 *resp_data, u16 resp_len);
191void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
07a0556a
SM
192void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev);
193
e2cb1dec 194void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state);
4a152de9
FL
195void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
196 u8 duplex);
35a1e503 197void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
07a0556a 198void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
e2cb1dec 199#endif