]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/ethernet/qlogic/qede/qede.h
ethernet/qlogic: use core min/max MTU checking
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / qlogic / qede / qede.h
CommitLineData
e712d52b
YM
1/* QLogic qede NIC Driver
2* Copyright (c) 2015 QLogic Corporation
3*
4* This software is available under the terms of the GNU General Public License
5* (GPL) Version 2, available from the file COPYING in the main directory of
6* this source tree.
7*/
8
9#ifndef _QEDE_H_
10#define _QEDE_H_
11#include <linux/compiler.h>
12#include <linux/version.h>
13#include <linux/workqueue.h>
14#include <linux/netdevice.h>
15#include <linux/interrupt.h>
16#include <linux/bitmap.h>
17#include <linux/kernel.h>
18#include <linux/mutex.h>
19#include <linux/io.h>
20#include <linux/qed/common_hsi.h>
21#include <linux/qed/eth_common.h>
22#include <linux/qed/qed_if.h>
23#include <linux/qed/qed_chain.h>
24#include <linux/qed/qed_eth_if.h>
25
26#define QEDE_MAJOR_VERSION 8
831a8e6c 27#define QEDE_MINOR_VERSION 10
05fafbfb 28#define QEDE_REVISION_VERSION 9
7c2d7d74 29#define QEDE_ENGINEERING_VERSION 20
e712d52b
YM
30#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
31 __stringify(QEDE_MINOR_VERSION) "." \
32 __stringify(QEDE_REVISION_VERSION) "." \
33 __stringify(QEDE_ENGINEERING_VERSION)
34
e712d52b
YM
35#define DRV_MODULE_SYM qede
36
133fac0e
SK
37struct qede_stats {
38 u64 no_buff_discards;
1a5a366f
SRK
39 u64 packet_too_big_discard;
40 u64 ttl0_discard;
133fac0e
SK
41 u64 rx_ucast_bytes;
42 u64 rx_mcast_bytes;
43 u64 rx_bcast_bytes;
44 u64 rx_ucast_pkts;
45 u64 rx_mcast_pkts;
46 u64 rx_bcast_pkts;
47 u64 mftag_filter_discards;
48 u64 mac_filter_discards;
49 u64 tx_ucast_bytes;
50 u64 tx_mcast_bytes;
51 u64 tx_bcast_bytes;
52 u64 tx_ucast_pkts;
53 u64 tx_mcast_pkts;
54 u64 tx_bcast_pkts;
55 u64 tx_err_drop_pkts;
56 u64 coalesced_pkts;
57 u64 coalesced_events;
58 u64 coalesced_aborts_num;
59 u64 non_coalesced_pkts;
60 u64 coalesced_bytes;
61
62 /* port */
63 u64 rx_64_byte_packets;
d4967cf3
YM
64 u64 rx_65_to_127_byte_packets;
65 u64 rx_128_to_255_byte_packets;
66 u64 rx_256_to_511_byte_packets;
67 u64 rx_512_to_1023_byte_packets;
68 u64 rx_1024_to_1518_byte_packets;
69 u64 rx_1519_to_1522_byte_packets;
70 u64 rx_1519_to_2047_byte_packets;
71 u64 rx_2048_to_4095_byte_packets;
72 u64 rx_4096_to_9216_byte_packets;
73 u64 rx_9217_to_16383_byte_packets;
133fac0e
SK
74 u64 rx_crc_errors;
75 u64 rx_mac_crtl_frames;
76 u64 rx_pause_frames;
77 u64 rx_pfc_frames;
78 u64 rx_align_errors;
79 u64 rx_carrier_errors;
80 u64 rx_oversize_packets;
81 u64 rx_jabbers;
82 u64 rx_undersize_packets;
83 u64 rx_fragments;
84 u64 tx_64_byte_packets;
85 u64 tx_65_to_127_byte_packets;
86 u64 tx_128_to_255_byte_packets;
87 u64 tx_256_to_511_byte_packets;
88 u64 tx_512_to_1023_byte_packets;
89 u64 tx_1024_to_1518_byte_packets;
90 u64 tx_1519_to_2047_byte_packets;
91 u64 tx_2048_to_4095_byte_packets;
92 u64 tx_4096_to_9216_byte_packets;
93 u64 tx_9217_to_16383_byte_packets;
94 u64 tx_pause_frames;
95 u64 tx_pfc_frames;
96 u64 tx_lpi_entry_count;
97 u64 tx_total_collisions;
98 u64 brb_truncates;
99 u64 brb_discards;
100 u64 tx_mac_ctrl_frames;
101};
102
7c1bfcad
SRK
103struct qede_vlan {
104 struct list_head list;
105 u16 vid;
106 bool configured;
107};
108
cee9fbd8
RA
109struct qede_rdma_dev {
110 struct qedr_dev *qedr_dev;
111 struct list_head entry;
112 struct list_head roce_event_list;
113 struct workqueue_struct *roce_wq;
114};
115
e712d52b
YM
116struct qede_dev {
117 struct qed_dev *cdev;
118 struct net_device *ndev;
119 struct pci_dev *pdev;
120
121 u32 dp_module;
122 u8 dp_level;
123
fefb0202
YM
124 u32 flags;
125#define QEDE_FLAG_IS_VF BIT(0)
126#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
127
e712d52b
YM
128 const struct qed_eth_ops *ops;
129
130 struct qed_dev_eth_info dev_info;
131#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
132#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \
133 (edev)->dev_info.num_tc)
134
2950219d 135 struct qede_fastpath *fp_array;
9a4d7e86
SRK
136 u8 req_num_tx;
137 u8 fp_num_tx;
138 u8 req_num_rx;
139 u8 fp_num_rx;
140 u16 req_queues;
141 u16 num_queues;
e712d52b 142 u8 num_tc;
9a4d7e86
SRK
143#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
144#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
145#define QEDE_TSS_COUNT(edev) (((edev)->num_queues - (edev)->fp_num_rx) * \
146 (edev)->num_tc)
147#define QEDE_TX_IDX(edev, txqidx) ((edev)->fp_num_rx + (txqidx) % \
148 QEDE_TSS_COUNT(edev))
149#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / QEDE_TSS_COUNT(edev))
2950219d 150#define QEDE_TX_QUEUE(edev, txqidx) \
9a4d7e86 151 (&(edev)->fp_array[QEDE_TX_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX(\
2950219d 152 (edev), (txqidx))])
e712d52b
YM
153
154 struct qed_int_info int_info;
155 unsigned char primary_mac[ETH_ALEN];
156
157 /* Smaller private varaiant of the RTNL lock */
158 struct mutex qede_lock;
159 u32 state; /* Protected by qede_lock */
2950219d 160 u16 rx_buf_size;
3d789994
MC
161 u32 rx_copybreak;
162
2950219d
YM
163 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
164#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
165 /* Max supported alignment is 256 (8 shift)
166 * minimal alignment shift 6 is optimal for 57xxx HW performance
167 */
168#define QEDE_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT))
169 /* We assume skb_build() uses sizeof(struct skb_shared_info) bytes
170 * at the end of skb->data, to avoid wasting a full cache line.
171 * This reduces memory use (skb->truesize).
172 */
173#define QEDE_FW_RX_ALIGN_END \
174 max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT, \
175 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
176
133fac0e 177 struct qede_stats stats;
961acdea
SRK
178#define QEDE_RSS_INDIR_INITED BIT(0)
179#define QEDE_RSS_KEY_INITED BIT(1)
180#define QEDE_RSS_CAPS_INITED BIT(2)
181 u32 rss_params_inited; /* bit-field to track initialized rss params */
2950219d
YM
182 struct qed_update_vport_rss_params rss_params;
183 u16 q_num_rx_buffers; /* Must be a power of two */
184 u16 q_num_tx_buffers; /* Must be a power of two */
0d8e0aa0 185
55482edc 186 bool gro_disable;
7c1bfcad
SRK
187 struct list_head vlan_list;
188 u16 configured_vlans;
189 u16 non_configured_vlans;
190 bool accept_any_vlan;
0d8e0aa0
SK
191 struct delayed_work sp_task;
192 unsigned long sp_flags;
b18e170c 193 u16 vxlan_dst_port;
9a109dd0 194 u16 geneve_dst_port;
cee9fbd8
RA
195
196 struct qede_rdma_dev rdma_info;
2950219d
YM
197};
198
199enum QEDE_STATE {
200 QEDE_STATE_CLOSED,
201 QEDE_STATE_OPEN,
202};
203
204#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
205
206#define MAX_NUM_TC 8
207#define MAX_NUM_PRI 8
208
209/* The driver supports the new build_skb() API:
210 * RX ring buffer contains pointer to kmalloc() data only,
211 * skb are built only after the frame was DMA-ed.
212 */
213struct sw_rx_data {
fc48b7a6
YM
214 struct page *data;
215 dma_addr_t mapping;
216 unsigned int page_offset;
2950219d
YM
217};
218
55482edc
MC
219enum qede_agg_state {
220 QEDE_AGG_STATE_NONE = 0,
221 QEDE_AGG_STATE_START = 1,
222 QEDE_AGG_STATE_ERROR = 2
223};
224
225struct qede_agg_info {
226 struct sw_rx_data replace_buf;
227 dma_addr_t replace_buf_mapping;
228 struct sw_rx_data start_buf;
229 dma_addr_t start_buf_mapping;
230 struct eth_fast_path_rx_tpa_start_cqe start_cqe;
231 enum qede_agg_state agg_state;
232 struct sk_buff *skb;
233 int frag_id;
234 u16 vlan_tag;
235};
236
2950219d
YM
237struct qede_rx_queue {
238 __le16 *hw_cons_ptr;
239 struct sw_rx_data *sw_rx_ring;
240 u16 sw_rx_cons;
241 u16 sw_rx_prod;
242 struct qed_chain rx_bd_ring;
243 struct qed_chain rx_comp_ring;
244 void __iomem *hw_rxq_prod_addr;
245
55482edc
MC
246 /* GRO */
247 struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
248
2950219d 249 int rx_buf_size;
fc48b7a6 250 unsigned int rx_buf_seg_size;
2950219d
YM
251
252 u16 num_rx_buffers;
253 u16 rxq_id;
254
68db9ec2 255 u64 rcv_pkts;
2950219d
YM
256 u64 rx_hw_errors;
257 u64 rx_alloc_errors;
c72a6125 258 u64 rx_ip_frags;
2950219d
YM
259};
260
261union db_prod {
262 struct eth_db_data data;
263 u32 raw;
264};
265
266struct sw_tx_bd {
267 struct sk_buff *skb;
268 u8 flags;
269/* Set on the first BD descriptor when there is a split BD */
270#define QEDE_TSO_SPLIT_BD BIT(0)
271};
272
273struct qede_tx_queue {
274 int index; /* Queue index */
275 __le16 *hw_cons_ptr;
276 struct sw_tx_bd *sw_tx_ring;
277 u16 sw_tx_cons;
278 u16 sw_tx_prod;
279 struct qed_chain tx_pbl;
280 void __iomem *doorbell_addr;
281 union db_prod tx_db;
282
283 u16 num_tx_buffers;
68db9ec2
SRK
284 u64 xmit_pkts;
285 u64 stopped_cnt;
d8c2c7e3
YM
286
287 bool is_legacy;
2950219d
YM
288};
289
290#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
291 le32_to_cpu((bd)->addr.lo))
292#define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
293 do { \
294 (bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr)); \
295 (bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr)); \
296 (bd)->nbytes = cpu_to_le16(len); \
297 } while (0)
298#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
299
300struct qede_fastpath {
301 struct qede_dev *edev;
9a4d7e86
SRK
302#define QEDE_FASTPATH_TX BIT(0)
303#define QEDE_FASTPATH_RX BIT(1)
304#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
305 u8 type;
306 u8 id;
2950219d
YM
307 struct napi_struct napi;
308 struct qed_sb_info *sb_info;
309 struct qede_rx_queue *rxq;
310 struct qede_tx_queue *txqs;
311
312#define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
313 char name[VEC_NAME_SIZE];
e712d52b
YM
314};
315
316/* Debug print definitions */
317#define DP_NAME(edev) ((edev)->ndev->name)
318
2950219d
YM
319#define XMIT_PLAIN 0
320#define XMIT_L4_CSUM BIT(0)
321#define XMIT_LSO BIT(1)
322#define XMIT_ENC BIT(2)
a150241c 323#define XMIT_ENC_GSO_L4_CSUM BIT(3)
2950219d
YM
324
325#define QEDE_CSUM_ERROR BIT(0)
326#define QEDE_CSUM_UNNECESSARY BIT(1)
14db81de 327#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
0d8e0aa0 328
b18e170c
MC
329#define QEDE_SP_RX_MODE 1
330#define QEDE_SP_VXLAN_PORT_CONFIG 2
9a109dd0 331#define QEDE_SP_GENEVE_PORT_CONFIG 3
0d8e0aa0
SK
332
333union qede_reload_args {
334 u16 mtu;
335};
336
489e45ae
SRK
337#ifdef CONFIG_DCB
338void qede_set_dcbnl_ops(struct net_device *ndev);
339#endif
133fac0e
SK
340void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
341void qede_set_ethtool_ops(struct net_device *netdev);
342void qede_reload(struct qede_dev *edev,
343 void (*func)(struct qede_dev *edev,
344 union qede_reload_args *args),
345 union qede_reload_args *args);
346int qede_change_mtu(struct net_device *dev, int new_mtu);
347void qede_fill_by_demand_stats(struct qede_dev *edev);
16f46bf0
SRK
348bool qede_has_rx_work(struct qede_rx_queue *rxq);
349int qede_txq_has_work(struct qede_tx_queue *txq);
350void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
351 u8 count);
133fac0e 352
2950219d 353#define RX_RING_SIZE_POW 13
01ef7e05 354#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
2950219d
YM
355#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
356#define NUM_RX_BDS_MIN 128
357#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
358
359#define TX_RING_SIZE_POW 13
01ef7e05 360#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
2950219d
YM
361#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
362#define NUM_TX_BDS_MIN 128
363#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
364
caff2a87
JW
365#define QEDE_MIN_PKT_LEN 64
366#define QEDE_RX_HDR_SIZE 256
367#define QEDE_MAX_JUMBO_PACKET_SIZE 9600
9a4d7e86 368#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
2950219d 369
e712d52b 370#endif /* _QEDE_H_ */