]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/qede/base/ecore_l2_api.h
575b9e3a5acdb9fb2e64f0a5b017b6e13a388bbf
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / qede / base / ecore_l2_api.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
3 * All rights reserved.
4 * www.cavium.com
5 */
6
7 #ifndef __ECORE_L2_API_H__
8 #define __ECORE_L2_API_H__
9
10 #include "ecore_status.h"
11 #include "ecore_sp_api.h"
12 #include "ecore_int_api.h"
13
14 #ifndef __EXTRACT__LINUX__
15 enum ecore_rss_caps {
16 ECORE_RSS_IPV4 = 0x1,
17 ECORE_RSS_IPV6 = 0x2,
18 ECORE_RSS_IPV4_TCP = 0x4,
19 ECORE_RSS_IPV6_TCP = 0x8,
20 ECORE_RSS_IPV4_UDP = 0x10,
21 ECORE_RSS_IPV6_UDP = 0x20,
22 };
23
24 /* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
25 #define ECORE_RSS_IND_TABLE_SIZE 128
26 #define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */
27 #endif
28
29 struct ecore_queue_start_common_params {
30 /* Should always be relative to entity sending this. */
31 u8 vport_id;
32 u16 queue_id;
33
34 /* Relative, but relevant only for PFs */
35 u8 stats_id;
36
37 struct ecore_sb_info *p_sb;
38 u8 sb_idx;
39 };
40
41 struct ecore_rxq_start_ret_params {
42 void OSAL_IOMEM *p_prod;
43 void *p_handle;
44 };
45
46 struct ecore_txq_start_ret_params {
47 void OSAL_IOMEM *p_doorbell;
48 void *p_handle;
49 };
50
51 struct ecore_rss_params {
52 u8 update_rss_config;
53 u8 rss_enable;
54 u8 rss_eng_id;
55 u8 update_rss_capabilities;
56 u8 update_rss_ind_table;
57 u8 update_rss_key;
58 u8 rss_caps;
59 u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
60
61 /* Indirection table consist of rx queue handles */
62 void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
63 u32 rss_key[ECORE_RSS_KEY_SIZE];
64 };
65
66 struct ecore_sge_tpa_params {
67 u8 max_buffers_per_cqe;
68
69 u8 update_tpa_en_flg;
70 u8 tpa_ipv4_en_flg;
71 u8 tpa_ipv6_en_flg;
72 u8 tpa_ipv4_tunn_en_flg;
73 u8 tpa_ipv6_tunn_en_flg;
74
75 u8 update_tpa_param_flg;
76 u8 tpa_pkt_split_flg;
77 u8 tpa_hdr_data_split_flg;
78 u8 tpa_gro_consistent_flg;
79 u8 tpa_max_aggs_num;
80 u16 tpa_max_size;
81 u16 tpa_min_size_to_start;
82 u16 tpa_min_size_to_cont;
83 };
84
85 enum ecore_filter_opcode {
86 ECORE_FILTER_ADD,
87 ECORE_FILTER_REMOVE,
88 ECORE_FILTER_MOVE,
89 ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */
90 ECORE_FILTER_FLUSH, /* Removes all filters */
91 };
92
93 enum ecore_filter_ucast_type {
94 ECORE_FILTER_MAC,
95 ECORE_FILTER_VLAN,
96 ECORE_FILTER_MAC_VLAN,
97 ECORE_FILTER_INNER_MAC,
98 ECORE_FILTER_INNER_VLAN,
99 ECORE_FILTER_INNER_PAIR,
100 ECORE_FILTER_INNER_MAC_VNI_PAIR,
101 ECORE_FILTER_MAC_VNI_PAIR,
102 ECORE_FILTER_VNI,
103 ECORE_FILTER_UNUSED, /* @DPDK */
104 };
105
106 struct ecore_filter_ucast {
107 enum ecore_filter_opcode opcode;
108 enum ecore_filter_ucast_type type;
109 u8 is_rx_filter;
110 u8 is_tx_filter;
111 u8 vport_to_add_to;
112 u8 vport_to_remove_from;
113 unsigned char mac[ETH_ALEN];
114 u8 assert_on_error;
115 u16 vlan;
116 u32 vni;
117 };
118
119 struct ecore_filter_mcast {
120 /* MOVE is not supported for multicast */
121 enum ecore_filter_opcode opcode;
122 u8 vport_to_add_to;
123 u8 vport_to_remove_from;
124 u8 num_mc_addrs;
125 #define ECORE_MAX_MC_ADDRS 64
126 unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN];
127 };
128
129 struct ecore_filter_accept_flags {
130 u8 update_rx_mode_config;
131 u8 update_tx_mode_config;
132 u8 rx_accept_filter;
133 u8 tx_accept_filter;
134 #define ECORE_ACCEPT_NONE 0x01
135 #define ECORE_ACCEPT_UCAST_MATCHED 0x02
136 #define ECORE_ACCEPT_UCAST_UNMATCHED 0x04
137 #define ECORE_ACCEPT_MCAST_MATCHED 0x08
138 #define ECORE_ACCEPT_MCAST_UNMATCHED 0x10
139 #define ECORE_ACCEPT_BCAST 0x20
140 };
141
142 struct ecore_arfs_config_params {
143 bool tcp;
144 bool udp;
145 bool ipv4;
146 bool ipv6;
147 bool arfs_enable; /* Enable or disable arfs mode */
148 };
149
150 /* Add / remove / move / remove-all unicast MAC-VLAN filters.
151 * FW will assert in the following cases, so driver should take care...:
152 * 1. Adding a filter to a full table.
153 * 2. Adding a filter which already exists on that vport.
154 * 3. Removing a filter which doesn't exist.
155 */
156
157 enum _ecore_status_t
158 ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
159 struct ecore_filter_ucast *p_filter_cmd,
160 enum spq_mode comp_mode,
161 struct ecore_spq_comp_cb *p_comp_data);
162
163 /* Add / remove / move multicast MAC filters. */
164 enum _ecore_status_t
165 ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
166 struct ecore_filter_mcast *p_filter_cmd,
167 enum spq_mode comp_mode,
168 struct ecore_spq_comp_cb *p_comp_data);
169
170 /* Set "accept" filters */
171 enum _ecore_status_t
172 ecore_filter_accept_cmd(
173 struct ecore_dev *p_dev,
174 u8 vport,
175 struct ecore_filter_accept_flags accept_flags,
176 u8 update_accept_any_vlan,
177 u8 accept_any_vlan,
178 enum spq_mode comp_mode,
179 struct ecore_spq_comp_cb *p_comp_data);
180
181 /**
182 * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod
183 *
184 * This ramrod initializes an RX Queue for a VPort. An Assert is generated if
185 * the VPort ID is not currently initialized.
186 *
187 * @param p_hwfn
188 * @param opaque_fid
189 * @p_params Inputs; Relative for PF [SB being an exception]
190 * @param bd_max_bytes Maximum bytes that can be placed on a BD
191 * @param bd_chain_phys_addr Physical address of BDs for receive.
192 * @param cqe_pbl_addr Physical address of the CQE PBL Table.
193 * @param cqe_pbl_size Size of the CQE PBL Table
194 * @param p_ret_params Pointed struct to be filled with outputs.
195 *
196 * @return enum _ecore_status_t
197 */
198 enum _ecore_status_t
199 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
200 u16 opaque_fid,
201 struct ecore_queue_start_common_params *p_params,
202 u16 bd_max_bytes,
203 dma_addr_t bd_chain_phys_addr,
204 dma_addr_t cqe_pbl_addr,
205 u16 cqe_pbl_size,
206 struct ecore_rxq_start_ret_params *p_ret_params);
207
208 /**
209 * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue
210 *
211 * @param p_hwfn
212 * @param p_rxq Handler of queue to close
213 * @param eq_completion_only If True completion will be on
214 * EQe, if False completion will be
215 * on EQe if p_hwfn opaque
216 * different from the RXQ opaque
217 * otherwise on CQe.
218 * @param cqe_completion If True completion will be
219 * receive on CQe.
220 * @return enum _ecore_status_t
221 */
222 enum _ecore_status_t
223 ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
224 void *p_rxq,
225 bool eq_completion_only,
226 bool cqe_completion);
227
228 /**
229 * @brief - TX Queue Start Ramrod
230 *
231 * This ramrod initializes a TX Queue for a VPort. An Assert is generated if
232 * the VPort is not currently initialized.
233 *
234 * @param p_hwfn
235 * @param opaque_fid
236 * @p_params
237 * @param tc traffic class to use with this L2 txq
238 * @param pbl_addr address of the pbl array
239 * @param pbl_size number of entries in pbl
240 * @param p_ret_params Pointer to fill the return parameters in.
241 *
242 * @return enum _ecore_status_t
243 */
244 enum _ecore_status_t
245 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
246 u16 opaque_fid,
247 struct ecore_queue_start_common_params *p_params,
248 u8 tc,
249 dma_addr_t pbl_addr,
250 u16 pbl_size,
251 struct ecore_txq_start_ret_params *p_ret_params);
252
253 /**
254 * @brief ecore_eth_tx_queue_stop - closes a Tx queue
255 *
256 * @param p_hwfn
257 * @param p_txq - handle to Tx queue needed to be closed
258 *
259 * @return enum _ecore_status_t
260 */
261 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
262 void *p_txq);
263
264 enum ecore_tpa_mode {
265 ECORE_TPA_MODE_NONE,
266 ECORE_TPA_MODE_RSC,
267 ECORE_TPA_MODE_GRO,
268 ECORE_TPA_MODE_MAX
269 };
270
271 struct ecore_sp_vport_start_params {
272 enum ecore_tpa_mode tpa_mode;
273 bool remove_inner_vlan; /* Inner VLAN removal is enabled */
274 bool tx_switching; /* Vport supports tx-switching */
275 bool handle_ptp_pkts; /* Handle PTP packets */
276 bool only_untagged; /* Untagged pkt control */
277 bool drop_ttl0; /* Drop packets with TTL = 0 */
278 u8 max_buffers_per_cqe;
279 u32 concrete_fid;
280 u16 opaque_fid;
281 u8 vport_id; /* VPORT ID */
282 u16 mtu; /* VPORT MTU */
283 bool zero_placement_offset;
284 bool check_mac;
285 bool check_ethtype;
286
287 /* Strict behavior on transmission errors */
288 bool b_err_illegal_vlan_mode;
289 bool b_err_illegal_inband_mode;
290 bool b_err_vlan_insert_with_inband;
291 bool b_err_small_pkt;
292 bool b_err_big_pkt;
293 bool b_err_anti_spoof;
294 bool b_err_ctrl_frame;
295 };
296
297 /**
298 * @brief ecore_sp_vport_start -
299 *
300 * This ramrod initializes a VPort. An Assert if generated if the Function ID
301 * of the VPort is not enabled.
302 *
303 * @param p_hwfn
304 * @param p_params VPORT start params
305 *
306 * @return enum _ecore_status_t
307 */
308 enum _ecore_status_t
309 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
310 struct ecore_sp_vport_start_params *p_params);
311
312 struct ecore_sp_vport_update_params {
313 u16 opaque_fid;
314 u8 vport_id;
315 u8 update_vport_active_rx_flg;
316 u8 vport_active_rx_flg;
317 u8 update_vport_active_tx_flg;
318 u8 vport_active_tx_flg;
319 u8 update_inner_vlan_removal_flg;
320 u8 inner_vlan_removal_flg;
321 u8 silent_vlan_removal_flg;
322 u8 update_default_vlan_enable_flg;
323 u8 default_vlan_enable_flg;
324 u8 update_default_vlan_flg;
325 u16 default_vlan;
326 u8 update_tx_switching_flg;
327 u8 tx_switching_flg;
328 u8 update_approx_mcast_flg;
329 u8 update_anti_spoofing_en_flg;
330 u8 anti_spoofing_en;
331 u8 update_accept_any_vlan_flg;
332 u8 accept_any_vlan;
333 u32 bins[8];
334 struct ecore_rss_params *rss_params;
335 struct ecore_filter_accept_flags accept_flags;
336 struct ecore_sge_tpa_params *sge_tpa_params;
337 /* MTU change - notice this requires the vport to be disabled.
338 * If non-zero, value would be used.
339 */
340 u16 mtu;
341 };
342
343 /**
344 * @brief ecore_sp_vport_update -
345 *
346 * This ramrod updates the parameters of the VPort. Every field can be updated
347 * independently, according to flags.
348 *
349 * This ramrod is also used to set the VPort state to active after creation.
350 * An Assert is generated if the VPort does not contain an RX queue.
351 *
352 * @param p_hwfn
353 * @param p_params
354 *
355 * @return enum _ecore_status_t
356 */
357 enum _ecore_status_t
358 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
359 struct ecore_sp_vport_update_params *p_params,
360 enum spq_mode comp_mode,
361 struct ecore_spq_comp_cb *p_comp_data);
362 /**
363 * @brief ecore_sp_vport_stop -
364 *
365 * This ramrod closes a VPort after all its RX and TX queues are terminated.
366 * An Assert is generated if any queues are left open.
367 *
368 * @param p_hwfn
369 * @param opaque_fid
370 * @param vport_id VPort ID
371 *
372 * @return enum _ecore_status_t
373 */
374 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
375 u16 opaque_fid,
376 u8 vport_id);
377
378 enum _ecore_status_t
379 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
380 u16 opaque_fid,
381 struct ecore_filter_ucast *p_filter_cmd,
382 enum spq_mode comp_mode,
383 struct ecore_spq_comp_cb *p_comp_data);
384
385 /**
386 * @brief ecore_sp_rx_eth_queues_update -
387 *
388 * This ramrod updates an RX queue. It is used for setting the active state
389 * of the queue and updating the TPA and SGE parameters.
390 *
391 * @note Final phase API.
392 *
393 * @param p_hwfn
394 * @param pp_rxq_handlers An array of queue handlers to be updated.
395 * @param num_rxqs number of queues to update.
396 * @param complete_cqe_flg Post completion to the CQE Ring if set
397 * @param complete_event_flg Post completion to the Event Ring if set
398 * @param comp_mode
399 * @param p_comp_data
400 *
401 * @return enum _ecore_status_t
402 */
403
404 enum _ecore_status_t
405 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
406 void **pp_rxq_handlers,
407 u8 num_rxqs,
408 u8 complete_cqe_flg,
409 u8 complete_event_flg,
410 enum spq_mode comp_mode,
411 struct ecore_spq_comp_cb *p_comp_data);
412
413 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
414 struct ecore_ptt *p_ptt,
415 struct ecore_eth_stats *stats,
416 u16 statistics_bin, bool b_get_port_stats);
417
418 void ecore_get_vport_stats(struct ecore_dev *p_dev,
419 struct ecore_eth_stats *stats);
420
421 void ecore_reset_vport_stats(struct ecore_dev *p_dev);
422
423 /**
424 *@brief ecore_arfs_mode_configure -
425 *
426 *Enable or disable rfs mode. It must accept atleast one of tcp or udp true
427 *and atleast one of ipv4 or ipv6 true to enable rfs mode.
428 *
429 *@param p_hwfn
430 *@param p_ptt
431 *@param p_cfg_params arfs mode configuration parameters.
432 *
433 */
434 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
435 struct ecore_ptt *p_ptt,
436 struct ecore_arfs_config_params *p_cfg_params);
437
438 /**
439 * @brief - ecore_configure_rfs_ntuple_filter
440 *
441 * This ramrod should be used to add or remove arfs hw filter
442 *
443 * @params p_hwfn
444 * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
445 * it with cookie and callback function address, if not
446 * using this mode then client must pass NULL.
447 * @params p_addr p_addr is an actual packet header that needs to be
448 * filter. It has to mapped with IO to read prior to
449 * calling this, [contains 4 tuples- src ip, dest ip,
450 * src port, dest port].
451 * @params length length of p_addr header up to past the transport header.
452 * @params qid receive packet will be directed to this queue.
453 * @params vport_id
454 * @params b_is_add flag to add or remove filter.
455 *
456 */
457 enum _ecore_status_t
458 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
459 struct ecore_spq_comp_cb *p_cb,
460 dma_addr_t p_addr, u16 length,
461 u16 qid, u8 vport_id,
462 bool b_is_add);
463 #endif