]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/drivers/net/e1000/e1000_ethdev.h
bump version to 12.2.12-pve1
[ceph.git] / ceph / src / dpdk / drivers / net / e1000 / e1000_ethdev.h
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #ifndef _E1000_ETHDEV_H_
35 #define _E1000_ETHDEV_H_
36 #include <rte_time.h>
37
38 #define E1000_INTEL_VENDOR_ID 0x8086
39
40 /* need update link, bit flag */
41 #define E1000_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
42 #define E1000_FLAG_MAILBOX (uint32_t)(1 << 1)
43
44 /*
45 * Defines that were not part of e1000_hw.h as they are not used by the FreeBSD
46 * driver.
47 */
48 #define E1000_ADVTXD_POPTS_TXSM 0x00000200 /* L4 Checksum offload request */
49 #define E1000_ADVTXD_POPTS_IXSM 0x00000100 /* IP Checksum offload request */
50 #define E1000_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE of Reserved */
51 #define E1000_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */
52 #define E1000_RXD_ERR_CKSUM_BIT 29
53 #define E1000_RXD_ERR_CKSUM_MSK 3
54 #define E1000_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */
55 #define E1000_CTRL_EXT_EXTEND_VLAN (1<<26) /* EXTENDED VLAN */
56 #define IGB_VFTA_SIZE 128
57
58 #define IGB_MAX_RX_QUEUE_NUM 8
59 #define IGB_MAX_RX_QUEUE_NUM_82576 16
60
61 #define E1000_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */
62 #define E1000_SYN_FILTER_QUEUE 0x0000000E /* syn filter queue field */
63 #define E1000_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field */
64 #define E1000_RFCTL_SYNQFP 0x00080000 /* SYNQFP in RFCTL register */
65
66 #define E1000_ETQF_ETHERTYPE 0x0000FFFF
67 #define E1000_ETQF_QUEUE 0x00070000
68 #define E1000_ETQF_QUEUE_SHIFT 16
69 #define E1000_MAX_ETQF_FILTERS 8
70
71 #define E1000_IMIR_DSTPORT 0x0000FFFF
72 #define E1000_IMIR_PRIORITY 0xE0000000
73 #define E1000_MAX_TTQF_FILTERS 8
74 #define E1000_2TUPLE_MAX_PRI 7
75
76 #define E1000_MAX_FLEX_FILTERS 8
77 #define E1000_MAX_FHFT 4
78 #define E1000_MAX_FHFT_EXT 4
79 #define E1000_FHFT_SIZE_IN_DWD 64
80 #define E1000_MAX_FLEX_FILTER_PRI 7
81 #define E1000_MAX_FLEX_FILTER_LEN 128
82 #define E1000_MAX_FLEX_FILTER_DWDS \
83 (E1000_MAX_FLEX_FILTER_LEN / sizeof(uint32_t))
84 #define E1000_FLEX_FILTERS_MASK_SIZE \
85 (E1000_MAX_FLEX_FILTER_DWDS / 4)
86 #define E1000_FHFT_QUEUEING_LEN 0x0000007F
87 #define E1000_FHFT_QUEUEING_QUEUE 0x00000700
88 #define E1000_FHFT_QUEUEING_PRIO 0x00070000
89 #define E1000_FHFT_QUEUEING_OFFSET 0xFC
90 #define E1000_FHFT_QUEUEING_QUEUE_SHIFT 8
91 #define E1000_FHFT_QUEUEING_PRIO_SHIFT 16
92 #define E1000_WUFC_FLEX_HQ 0x00004000
93
94 #define E1000_SPQF_SRCPORT 0x0000FFFF
95
96 #define E1000_MAX_FTQF_FILTERS 8
97 #define E1000_FTQF_PROTOCOL_MASK 0x000000FF
98 #define E1000_FTQF_5TUPLE_MASK_SHIFT 28
99 #define E1000_FTQF_QUEUE_MASK 0x03ff0000
100 #define E1000_FTQF_QUEUE_SHIFT 16
101 #define E1000_FTQF_QUEUE_ENABLE 0x00000100
102
103 #define IGB_RSS_OFFLOAD_ALL ( \
104 ETH_RSS_IPV4 | \
105 ETH_RSS_NONFRAG_IPV4_TCP | \
106 ETH_RSS_NONFRAG_IPV4_UDP | \
107 ETH_RSS_IPV6 | \
108 ETH_RSS_NONFRAG_IPV6_TCP | \
109 ETH_RSS_NONFRAG_IPV6_UDP | \
110 ETH_RSS_IPV6_EX | \
111 ETH_RSS_IPV6_TCP_EX | \
112 ETH_RSS_IPV6_UDP_EX)
113
114 /*
115 * Maximum number of Ring Descriptors.
116 *
117 * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
118 * desscriptors should meet the following condition:
119 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
120 */
121 #define E1000_MIN_RING_DESC 32
122 #define E1000_MAX_RING_DESC 4096
123
124 /*
125 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
126 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
127 * This will also optimize cache line size effect.
128 * H/W supports up to cache line size 128.
129 */
130 #define E1000_ALIGN 128
131
132 #define IGB_RXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_rx_desc))
133 #define IGB_TXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_tx_desc))
134
135 #define EM_RXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_rx_desc))
136 #define EM_TXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_data_desc))
137
138 #define E1000_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
139 #define E1000_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
140
141 /* structure for interrupt relative data */
142 struct e1000_interrupt {
143 uint32_t flags;
144 uint32_t mask;
145 };
146
147 /* local vfta copy */
148 struct e1000_vfta {
149 uint32_t vfta[IGB_VFTA_SIZE];
150 };
151
152 /*
153 * VF data which used by PF host only
154 */
155 #define E1000_MAX_VF_MC_ENTRIES 30
156 struct e1000_vf_info {
157 uint8_t vf_mac_addresses[ETHER_ADDR_LEN];
158 uint16_t vf_mc_hashes[E1000_MAX_VF_MC_ENTRIES];
159 uint16_t num_vf_mc_hashes;
160 uint16_t default_vf_vlan_id;
161 uint16_t vlans_enabled;
162 uint16_t pf_qos;
163 uint16_t vlan_count;
164 uint16_t tx_rate;
165 };
166
167 TAILQ_HEAD(e1000_flex_filter_list, e1000_flex_filter);
168
169 struct e1000_flex_filter_info {
170 uint16_t len;
171 uint32_t dwords[E1000_MAX_FLEX_FILTER_DWDS]; /* flex bytes in dword. */
172 /* if mask bit is 1b, do not compare corresponding byte in dwords. */
173 uint8_t mask[E1000_FLEX_FILTERS_MASK_SIZE];
174 uint8_t priority;
175 };
176
177 /* Flex filter structure */
178 struct e1000_flex_filter {
179 TAILQ_ENTRY(e1000_flex_filter) entries;
180 uint16_t index; /* index of flex filter */
181 struct e1000_flex_filter_info filter_info;
182 uint16_t queue; /* rx queue assigned to */
183 };
184
185 TAILQ_HEAD(e1000_5tuple_filter_list, e1000_5tuple_filter);
186 TAILQ_HEAD(e1000_2tuple_filter_list, e1000_2tuple_filter);
187
188 struct e1000_5tuple_filter_info {
189 uint32_t dst_ip;
190 uint32_t src_ip;
191 uint16_t dst_port;
192 uint16_t src_port;
193 uint8_t proto; /* l4 protocol. */
194 /* the packet matched above 5tuple and contain any set bit will hit this filter. */
195 uint8_t tcp_flags;
196 uint8_t priority; /* seven levels (001b-111b), 111b is highest,
197 used when more than one filter matches. */
198 uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
199 src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
200 dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
201 src_port_mask:1, /* if mask is 1b, do not compare src port. */
202 proto_mask:1; /* if mask is 1b, do not compare protocol. */
203 };
204
205 struct e1000_2tuple_filter_info {
206 uint16_t dst_port;
207 uint8_t proto; /* l4 protocol. */
208 /* the packet matched above 2tuple and contain any set bit will hit this filter. */
209 uint8_t tcp_flags;
210 uint8_t priority; /* seven levels (001b-111b), 111b is highest,
211 used when more than one filter matches. */
212 uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
213 src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
214 dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
215 src_port_mask:1, /* if mask is 1b, do not compare src port. */
216 proto_mask:1; /* if mask is 1b, do not compare protocol. */
217 };
218
219 /* 5tuple filter structure */
220 struct e1000_5tuple_filter {
221 TAILQ_ENTRY(e1000_5tuple_filter) entries;
222 uint16_t index; /* the index of 5tuple filter */
223 struct e1000_5tuple_filter_info filter_info;
224 uint16_t queue; /* rx queue assigned to */
225 };
226
227 /* 2tuple filter structure */
228 struct e1000_2tuple_filter {
229 TAILQ_ENTRY(e1000_2tuple_filter) entries;
230 uint16_t index; /* the index of 2tuple filter */
231 struct e1000_2tuple_filter_info filter_info;
232 uint16_t queue; /* rx queue assigned to */
233 };
234
235 /*
236 * Structure to store filters' info.
237 */
238 struct e1000_filter_info {
239 uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
240 /* store used ethertype filters*/
241 uint16_t ethertype_filters[E1000_MAX_ETQF_FILTERS];
242 uint8_t flex_mask; /* Bit mask for every used flex filter */
243 struct e1000_flex_filter_list flex_list;
244 /* Bit mask for every used 5tuple filter */
245 uint8_t fivetuple_mask;
246 struct e1000_5tuple_filter_list fivetuple_list;
247 /* Bit mask for every used 2tuple filter */
248 uint8_t twotuple_mask;
249 struct e1000_2tuple_filter_list twotuple_list;
250 };
251
252 /*
253 * Structure to store private data for each driver instance (for each port).
254 */
255 struct e1000_adapter {
256 struct e1000_hw hw;
257 struct e1000_hw_stats stats;
258 struct e1000_interrupt intr;
259 struct e1000_vfta shadow_vfta;
260 struct e1000_vf_info *vfdata;
261 struct e1000_filter_info filter;
262 bool stopped;
263 struct rte_timecounter systime_tc;
264 struct rte_timecounter rx_tstamp_tc;
265 struct rte_timecounter tx_tstamp_tc;
266 };
267
268 #define E1000_DEV_PRIVATE(adapter) \
269 ((struct e1000_adapter *)adapter)
270
271 #define E1000_DEV_PRIVATE_TO_HW(adapter) \
272 (&((struct e1000_adapter *)adapter)->hw)
273
274 #define E1000_DEV_PRIVATE_TO_STATS(adapter) \
275 (&((struct e1000_adapter *)adapter)->stats)
276
277 #define E1000_DEV_PRIVATE_TO_INTR(adapter) \
278 (&((struct e1000_adapter *)adapter)->intr)
279
280 #define E1000_DEV_PRIVATE_TO_VFTA(adapter) \
281 (&((struct e1000_adapter *)adapter)->shadow_vfta)
282
283 #define E1000_DEV_PRIVATE_TO_P_VFDATA(adapter) \
284 (&((struct e1000_adapter *)adapter)->vfdata)
285
286 #define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
287 (&((struct e1000_adapter *)adapter)->filter)
288
289 /*
290 * RX/TX IGB function prototypes
291 */
292 void eth_igb_tx_queue_release(void *txq);
293 void eth_igb_rx_queue_release(void *rxq);
294 void igb_dev_clear_queues(struct rte_eth_dev *dev);
295 void igb_dev_free_queues(struct rte_eth_dev *dev);
296
297 int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
298 uint16_t nb_rx_desc, unsigned int socket_id,
299 const struct rte_eth_rxconf *rx_conf,
300 struct rte_mempool *mb_pool);
301
302 uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev,
303 uint16_t rx_queue_id);
304
305 int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
306
307 int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
308 uint16_t nb_tx_desc, unsigned int socket_id,
309 const struct rte_eth_txconf *tx_conf);
310
311 int eth_igb_rx_init(struct rte_eth_dev *dev);
312
313 void eth_igb_tx_init(struct rte_eth_dev *dev);
314
315 uint16_t eth_igb_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts,
316 uint16_t nb_pkts);
317
318 uint16_t eth_igb_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
319 uint16_t nb_pkts);
320
321 uint16_t eth_igb_recv_scattered_pkts(void *rxq,
322 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
323
324 int eth_igb_rss_hash_update(struct rte_eth_dev *dev,
325 struct rte_eth_rss_conf *rss_conf);
326
327 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
328 struct rte_eth_rss_conf *rss_conf);
329
330 int eth_igbvf_rx_init(struct rte_eth_dev *dev);
331
332 void eth_igbvf_tx_init(struct rte_eth_dev *dev);
333
334 /*
335 * misc function prototypes
336 */
337 void igb_pf_host_init(struct rte_eth_dev *eth_dev);
338
339 void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);
340
341 int igb_pf_host_configure(struct rte_eth_dev *eth_dev);
342
343 void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
344 struct rte_eth_rxq_info *qinfo);
345
346 void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
347 struct rte_eth_txq_info *qinfo);
348
349 /*
350 * RX/TX EM function prototypes
351 */
352 void eth_em_tx_queue_release(void *txq);
353 void eth_em_rx_queue_release(void *rxq);
354
355 void em_dev_clear_queues(struct rte_eth_dev *dev);
356 void em_dev_free_queues(struct rte_eth_dev *dev);
357
358 int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
359 uint16_t nb_rx_desc, unsigned int socket_id,
360 const struct rte_eth_rxconf *rx_conf,
361 struct rte_mempool *mb_pool);
362
363 uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev,
364 uint16_t rx_queue_id);
365
366 int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
367
368 int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
369 uint16_t nb_tx_desc, unsigned int socket_id,
370 const struct rte_eth_txconf *tx_conf);
371
372 int eth_em_rx_init(struct rte_eth_dev *dev);
373
374 void eth_em_tx_init(struct rte_eth_dev *dev);
375
376 uint16_t eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
377 uint16_t nb_pkts);
378
379 uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
380 uint16_t nb_pkts);
381
382 uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
383 uint16_t nb_pkts);
384
385 void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
386 struct rte_eth_rxq_info *qinfo);
387
388 void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
389 struct rte_eth_txq_info *qinfo);
390
391 void igb_pf_host_uninit(struct rte_eth_dev *dev);
392
393 #endif /* _E1000_ETHDEV_H_ */