]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/qlogic/qed/qed_dev_api.h
qed: refactor tunnelling - API/Structs
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / qlogic / qed / qed_dev_api.h
CommitLineData
fe56b9e6 1/* QLogic qed NIC Driver
e8f1cb50 2 * Copyright (c) 2015-2017 QLogic Corporation
fe56b9e6 3 *
e8f1cb50
MY
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
fe56b9e6
YM
31 */
32
33#ifndef _QED_DEV_API_H
34#define _QED_DEV_API_H
35
36#include <linux/types.h>
37#include <linux/kernel.h>
38#include <linux/slab.h>
39#include <linux/qed/qed_chain.h>
40#include <linux/qed/qed_if.h>
41#include "qed_int.h"
42
43/**
44 * @brief qed_init_dp - initialize the debug level
45 *
46 * @param cdev
47 * @param dp_module
48 * @param dp_level
49 */
50void qed_init_dp(struct qed_dev *cdev,
51 u32 dp_module,
52 u8 dp_level);
53
54/**
55 * @brief qed_init_struct - initialize the device structure to
56 * its defaults
57 *
58 * @param cdev
59 */
60void qed_init_struct(struct qed_dev *cdev);
61
62/**
63 * @brief qed_resc_free -
64 *
65 * @param cdev
66 */
67void qed_resc_free(struct qed_dev *cdev);
68
69/**
70 * @brief qed_resc_alloc -
71 *
72 * @param cdev
73 *
74 * @return int
75 */
76int qed_resc_alloc(struct qed_dev *cdev);
77
78/**
79 * @brief qed_resc_setup -
80 *
81 * @param cdev
82 */
83void qed_resc_setup(struct qed_dev *cdev);
84
5d24bcf1
TT
85enum qed_override_force_load {
86 QED_OVERRIDE_FORCE_LOAD_NONE,
87 QED_OVERRIDE_FORCE_LOAD_ALWAYS,
88 QED_OVERRIDE_FORCE_LOAD_NEVER,
89};
90
91struct qed_drv_load_params {
92 /* Indicates whether the driver is running over a crash kernel.
93 * As part of the load request, this will be used for providing the
94 * driver role to the MFW.
95 * In case of a crash kernel over PDA - this should be set to false.
96 */
97 bool is_crash_kernel;
98
99 /* The timeout value that the MFW should use when locking the engine for
100 * the driver load process.
101 * A value of '0' means the default value, and '255' means no timeout.
102 */
103 u8 mfw_timeout_val;
104#define QED_LOAD_REQ_LOCK_TO_DEFAULT 0
105#define QED_LOAD_REQ_LOCK_TO_NONE 255
106
107 /* Avoid engine reset when first PF loads on it */
108 bool avoid_eng_reset;
109
110 /* Allow overriding the default force load behavior */
111 enum qed_override_force_load override_force_load;
112};
113
c0c2d0b4
MY
114struct qed_hw_init_params {
115 /* Tunneling parameters */
19968430 116 struct qed_tunnel_info *p_tunn;
c0c2d0b4
MY
117
118 bool b_hw_start;
119
120 /* Interrupt mode [msix, inta, etc.] to use */
121 enum qed_int_mode int_mode;
122
123 /* NPAR tx switching to be used for vports for tx-switching */
124 bool allow_npar_tx_switch;
125
126 /* Binary fw data pointer in binary fw file */
127 const u8 *bin_fw_data;
5d24bcf1
TT
128
129 /* Driver load parameters */
130 struct qed_drv_load_params *p_drv_load_params;
c0c2d0b4
MY
131};
132
fe56b9e6
YM
133/**
134 * @brief qed_hw_init -
135 *
136 * @param cdev
c0c2d0b4 137 * @param p_params
fe56b9e6
YM
138 *
139 * @return int
140 */
c0c2d0b4 141int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params);
fe56b9e6 142
8c925c44
YM
143/**
144 * @brief qed_hw_timers_stop_all - stop the timers HW block
145 *
146 * @param cdev
147 *
148 * @return void
149 */
150void qed_hw_timers_stop_all(struct qed_dev *cdev);
151
fe56b9e6
YM
152/**
153 * @brief qed_hw_stop -
154 *
155 * @param cdev
156 *
157 * @return int
158 */
159int qed_hw_stop(struct qed_dev *cdev);
160
cee4d264
MC
161/**
162 * @brief qed_hw_stop_fastpath -should be called incase
163 * slowpath is still required for the device,
164 * but fastpath is not.
165 *
166 * @param cdev
167 *
15582962 168 * @return int
cee4d264 169 */
15582962 170int qed_hw_stop_fastpath(struct qed_dev *cdev);
cee4d264
MC
171
172/**
173 * @brief qed_hw_start_fastpath -restart fastpath traffic,
174 * only if hw_stop_fastpath was called
175 *
15582962 176 * @param p_hwfn
cee4d264 177 *
15582962 178 * @return int
cee4d264 179 */
15582962 180int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
cee4d264 181
fe56b9e6
YM
182
183/**
184 * @brief qed_hw_prepare -
185 *
186 * @param cdev
187 * @param personality - personality to initialize
188 *
189 * @return int
190 */
191int qed_hw_prepare(struct qed_dev *cdev,
192 int personality);
193
194/**
195 * @brief qed_hw_remove -
196 *
197 * @param cdev
198 */
199void qed_hw_remove(struct qed_dev *cdev);
200
201/**
202 * @brief qed_ptt_acquire - Allocate a PTT window
203 *
204 * Should be called at the entry point to the driver (at the beginning of an
205 * exported function)
206 *
207 * @param p_hwfn
208 *
209 * @return struct qed_ptt
210 */
211struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
212
213/**
214 * @brief qed_ptt_release - Release PTT Window
215 *
216 * Should be called at the end of a flow - at the end of the function that
217 * acquired the PTT.
218 *
219 *
220 * @param p_hwfn
221 * @param p_ptt
222 */
223void qed_ptt_release(struct qed_hwfn *p_hwfn,
224 struct qed_ptt *p_ptt);
9df2ed04 225void qed_reset_vport_stats(struct qed_dev *cdev);
fe56b9e6
YM
226
227enum qed_dmae_address_type_t {
228 QED_DMAE_ADDRESS_HOST_VIRT,
229 QED_DMAE_ADDRESS_HOST_PHYS,
230 QED_DMAE_ADDRESS_GRC
231};
232
233/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the
234 * source is a block of length DMAE_MAX_RW_SIZE and the
235 * destination is larger, the source block will be duplicated as
236 * many times as required to fill the destination block. This is
237 * used mostly to write a zeroed buffer to destination address
238 * using DMA
239 */
37bff2b9
YM
240#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
241#define QED_DMAE_FLAG_VF_SRC 0x00000002
242#define QED_DMAE_FLAG_VF_DST 0x00000004
243#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
fe56b9e6
YM
244
245struct qed_dmae_params {
37bff2b9
YM
246 u32 flags; /* consists of QED_DMAE_FLAG_* values */
247 u8 src_vfid;
248 u8 dst_vfid;
fe56b9e6
YM
249};
250
251/**
252 * @brief qed_dmae_host2grc - copy data from source addr to
253 * dmae registers using the given ptt
254 *
255 * @param p_hwfn
256 * @param p_ptt
257 * @param source_addr
258 * @param grc_addr (dmae_data_offset)
259 * @param size_in_dwords
260 * @param flags (one of the flags defined above)
261 */
262int
263qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
264 struct qed_ptt *p_ptt,
265 u64 source_addr,
266 u32 grc_addr,
267 u32 size_in_dwords,
268 u32 flags);
269
722003ac
SRK
270 /**
271 * @brief qed_dmae_grc2host - Read data from dmae data offset
272 * to source address using the given ptt
273 *
274 * @param p_ptt
275 * @param grc_addr (dmae_data_offset)
276 * @param dest_addr
277 * @param size_in_dwords
278 * @param flags - one of the flags defined above
279 */
280int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
281 u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords,
282 u32 flags);
283
37bff2b9
YM
284/**
285 * @brief qed_dmae_host2host - copy data from to source address
286 * to a destination adress (for SRIOV) using the given ptt
287 *
288 * @param p_hwfn
289 * @param p_ptt
290 * @param source_addr
291 * @param dest_addr
292 * @param size_in_dwords
293 * @param params
294 */
295int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
296 struct qed_ptt *p_ptt,
297 dma_addr_t source_addr,
298 dma_addr_t dest_addr,
299 u32 size_in_dwords, struct qed_dmae_params *p_params);
300
fe56b9e6
YM
301/**
302 * @brief qed_chain_alloc - Allocate and initialize a chain
303 *
304 * @param p_hwfn
305 * @param intended_use
306 * @param mode
307 * @param num_elems
308 * @param elem_size
309 * @param p_chain
310 *
311 * @return int
312 */
313int
314qed_chain_alloc(struct qed_dev *cdev,
315 enum qed_chain_use_mode intended_use,
316 enum qed_chain_mode mode,
a91eb52a
YM
317 enum qed_chain_cnt_type cnt_type,
318 u32 num_elems, size_t elem_size, struct qed_chain *p_chain);
fe56b9e6
YM
319
320/**
321 * @brief qed_chain_free - Free chain DMA memory
322 *
323 * @param p_hwfn
324 * @param p_chain
325 */
a91eb52a 326void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain);
fe56b9e6 327
cee4d264
MC
328/**
329 * @@brief qed_fw_l2_queue - Get absolute L2 queue ID
330 *
331 * @param p_hwfn
332 * @param src_id - relative to p_hwfn
333 * @param dst_id - absolute per engine
334 *
335 * @return int
336 */
337int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
338 u16 src_id,
339 u16 *dst_id);
340
341/**
342 * @@brief qed_fw_vport - Get absolute vport ID
343 *
344 * @param p_hwfn
345 * @param src_id - relative to p_hwfn
346 * @param dst_id - absolute per engine
347 *
348 * @return int
349 */
350int qed_fw_vport(struct qed_hwfn *p_hwfn,
351 u8 src_id,
352 u8 *dst_id);
353
354/**
355 * @@brief qed_fw_rss_eng - Get absolute RSS engine ID
356 *
357 * @param p_hwfn
358 * @param src_id - relative to p_hwfn
359 * @param dst_id - absolute per engine
360 *
361 * @return int
362 */
363int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
364 u8 src_id,
365 u8 *dst_id);
366
0a7fb11c
YM
367/**
368 * @brief qed_llh_add_mac_filter - configures a MAC filter in llh
369 *
370 * @param p_hwfn
371 * @param p_ptt
372 * @param p_filter - MAC to add
373 */
374int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
375 struct qed_ptt *p_ptt, u8 *p_filter);
376
377/**
378 * @brief qed_llh_remove_mac_filter - removes a MAC filter from llh
379 *
380 * @param p_hwfn
381 * @param p_ptt
382 * @param p_filter - MAC to remove
383 */
384void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
385 struct qed_ptt *p_ptt, u8 *p_filter);
386
1e128c81
AE
387enum qed_llh_port_filter_type_t {
388 QED_LLH_FILTER_ETHERTYPE,
389 QED_LLH_FILTER_TCP_SRC_PORT,
390 QED_LLH_FILTER_TCP_DEST_PORT,
391 QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT,
392 QED_LLH_FILTER_UDP_SRC_PORT,
393 QED_LLH_FILTER_UDP_DEST_PORT,
394 QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT
395};
396
397/**
398 * @brief qed_llh_add_protocol_filter - configures a protocol filter in llh
399 *
400 * @param p_hwfn
401 * @param p_ptt
402 * @param source_port_or_eth_type - source port or ethertype to add
403 * @param dest_port - destination port to add
404 * @param type - type of filters and comparing
405 */
406int
407qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
408 struct qed_ptt *p_ptt,
409 u16 source_port_or_eth_type,
410 u16 dest_port,
411 enum qed_llh_port_filter_type_t type);
412
413/**
414 * @brief qed_llh_remove_protocol_filter - remove a protocol filter in llh
415 *
416 * @param p_hwfn
417 * @param p_ptt
418 * @param source_port_or_eth_type - source port or ethertype to add
419 * @param dest_port - destination port to add
420 * @param type - type of filters and comparing
421 */
422void
423qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
424 struct qed_ptt *p_ptt,
425 u16 source_port_or_eth_type,
426 u16 dest_port,
427 enum qed_llh_port_filter_type_t type);
428
fe56b9e6
YM
429/**
430 * *@brief Cleanup of previous driver remains prior to load
431 *
432 * @param p_hwfn
433 * @param p_ptt
434 * @param id - For PF, engine-relative. For VF, PF-relative.
0b55e27d 435 * @param is_vf - true iff cleanup is made for a VF.
fe56b9e6
YM
436 *
437 * @return int
438 */
439int qed_final_cleanup(struct qed_hwfn *p_hwfn,
0b55e27d 440 struct qed_ptt *p_ptt, u16 id, bool is_vf);
fe56b9e6 441
722003ac
SRK
442/**
443 * @brief qed_set_rxq_coalesce - Configure coalesce parameters for an Rx queue
444 * The fact that we can configure coalescing to up to 511, but on varying
445 * accuracy [the bigger the value the less accurate] up to a mistake of 3usec
446 * for the highest values.
447 *
448 * @param p_hwfn
449 * @param p_ptt
450 * @param coalesce - Coalesce value in micro seconds.
451 * @param qid - Queue index.
452 * @param qid - SB Id
453 *
454 * @return int
455 */
456int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
457 u16 coalesce, u8 qid, u16 sb_id);
458
459/**
460 * @brief qed_set_txq_coalesce - Configure coalesce parameters for a Tx queue
461 * While the API allows setting coalescing per-qid, all tx queues sharing a
462 * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
463 * otherwise configuration would break.
464 *
465 * @param p_hwfn
466 * @param p_ptt
467 * @param coalesce - Coalesce value in micro seconds.
468 * @param qid - Queue index.
469 * @param qid - SB Id
470 *
471 * @return int
472 */
473int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
474 u16 coalesce, u8 qid, u16 sb_id);
9c8517c4
TT
475
476const char *qed_hw_get_resc_name(enum qed_resources res_id);
fe56b9e6 477#endif