]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/qlogic/qed/qed_dev_api.h
b6711c106597bc53654457c8c45c68e7085f0016
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / qlogic / qed / qed_dev_api.h
1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9 #ifndef _QED_DEV_API_H
10 #define _QED_DEV_API_H
11
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/qed/qed_chain.h>
16 #include <linux/qed/qed_if.h>
17 #include "qed_int.h"
18
19 /**
20 * @brief qed_init_dp - initialize the debug level
21 *
22 * @param cdev
23 * @param dp_module
24 * @param dp_level
25 */
26 void qed_init_dp(struct qed_dev *cdev,
27 u32 dp_module,
28 u8 dp_level);
29
30 /**
31 * @brief qed_init_struct - initialize the device structure to
32 * its defaults
33 *
34 * @param cdev
35 */
36 void qed_init_struct(struct qed_dev *cdev);
37
38 /**
39 * @brief qed_resc_free -
40 *
41 * @param cdev
42 */
43 void qed_resc_free(struct qed_dev *cdev);
44
45 /**
46 * @brief qed_resc_alloc -
47 *
48 * @param cdev
49 *
50 * @return int
51 */
52 int qed_resc_alloc(struct qed_dev *cdev);
53
54 /**
55 * @brief qed_resc_setup -
56 *
57 * @param cdev
58 */
59 void qed_resc_setup(struct qed_dev *cdev);
60
61 /**
62 * @brief qed_hw_init -
63 *
64 * @param cdev
65 * @param p_tunn
66 * @param b_hw_start
67 * @param int_mode - interrupt mode [msix, inta, etc.] to use.
68 * @param allow_npar_tx_switch - npar tx switching to be used
69 * for vports configured for tx-switching.
70 * @param bin_fw_data - binary fw data pointer in binary fw file.
71 * Pass NULL if not using binary fw file.
72 *
73 * @return int
74 */
75 int qed_hw_init(struct qed_dev *cdev,
76 struct qed_tunn_start_params *p_tunn,
77 bool b_hw_start,
78 enum qed_int_mode int_mode,
79 bool allow_npar_tx_switch,
80 const u8 *bin_fw_data);
81
82 /**
83 * @brief qed_hw_timers_stop_all - stop the timers HW block
84 *
85 * @param cdev
86 *
87 * @return void
88 */
89 void qed_hw_timers_stop_all(struct qed_dev *cdev);
90
91 /**
92 * @brief qed_hw_stop -
93 *
94 * @param cdev
95 *
96 * @return int
97 */
98 int qed_hw_stop(struct qed_dev *cdev);
99
100 /**
101 * @brief qed_hw_stop_fastpath -should be called incase
102 * slowpath is still required for the device,
103 * but fastpath is not.
104 *
105 * @param cdev
106 *
107 */
108 void qed_hw_stop_fastpath(struct qed_dev *cdev);
109
110 /**
111 * @brief qed_hw_start_fastpath -restart fastpath traffic,
112 * only if hw_stop_fastpath was called
113 *
114 * @param cdev
115 *
116 */
117 void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
118
119 /**
120 * @brief qed_hw_reset -
121 *
122 * @param cdev
123 *
124 * @return int
125 */
126 int qed_hw_reset(struct qed_dev *cdev);
127
128 /**
129 * @brief qed_hw_prepare -
130 *
131 * @param cdev
132 * @param personality - personality to initialize
133 *
134 * @return int
135 */
136 int qed_hw_prepare(struct qed_dev *cdev,
137 int personality);
138
139 /**
140 * @brief qed_hw_remove -
141 *
142 * @param cdev
143 */
144 void qed_hw_remove(struct qed_dev *cdev);
145
146 /**
147 * @brief qed_ptt_acquire - Allocate a PTT window
148 *
149 * Should be called at the entry point to the driver (at the beginning of an
150 * exported function)
151 *
152 * @param p_hwfn
153 *
154 * @return struct qed_ptt
155 */
156 struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
157
158 /**
159 * @brief qed_ptt_release - Release PTT Window
160 *
161 * Should be called at the end of a flow - at the end of the function that
162 * acquired the PTT.
163 *
164 *
165 * @param p_hwfn
166 * @param p_ptt
167 */
168 void qed_ptt_release(struct qed_hwfn *p_hwfn,
169 struct qed_ptt *p_ptt);
170 void qed_reset_vport_stats(struct qed_dev *cdev);
171
172 enum qed_dmae_address_type_t {
173 QED_DMAE_ADDRESS_HOST_VIRT,
174 QED_DMAE_ADDRESS_HOST_PHYS,
175 QED_DMAE_ADDRESS_GRC
176 };
177
178 /* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the
179 * source is a block of length DMAE_MAX_RW_SIZE and the
180 * destination is larger, the source block will be duplicated as
181 * many times as required to fill the destination block. This is
182 * used mostly to write a zeroed buffer to destination address
183 * using DMA
184 */
185 #define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
186 #define QED_DMAE_FLAG_VF_SRC 0x00000002
187 #define QED_DMAE_FLAG_VF_DST 0x00000004
188 #define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
189
190 struct qed_dmae_params {
191 u32 flags; /* consists of QED_DMAE_FLAG_* values */
192 u8 src_vfid;
193 u8 dst_vfid;
194 };
195
196 /**
197 * @brief qed_dmae_host2grc - copy data from source addr to
198 * dmae registers using the given ptt
199 *
200 * @param p_hwfn
201 * @param p_ptt
202 * @param source_addr
203 * @param grc_addr (dmae_data_offset)
204 * @param size_in_dwords
205 * @param flags (one of the flags defined above)
206 */
207 int
208 qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
209 struct qed_ptt *p_ptt,
210 u64 source_addr,
211 u32 grc_addr,
212 u32 size_in_dwords,
213 u32 flags);
214
215 /**
216 * @brief qed_dmae_grc2host - Read data from dmae data offset
217 * to source address using the given ptt
218 *
219 * @param p_ptt
220 * @param grc_addr (dmae_data_offset)
221 * @param dest_addr
222 * @param size_in_dwords
223 * @param flags - one of the flags defined above
224 */
225 int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
226 u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords,
227 u32 flags);
228
229 /**
230 * @brief qed_dmae_host2host - copy data from to source address
231 * to a destination adress (for SRIOV) using the given ptt
232 *
233 * @param p_hwfn
234 * @param p_ptt
235 * @param source_addr
236 * @param dest_addr
237 * @param size_in_dwords
238 * @param params
239 */
240 int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
241 struct qed_ptt *p_ptt,
242 dma_addr_t source_addr,
243 dma_addr_t dest_addr,
244 u32 size_in_dwords, struct qed_dmae_params *p_params);
245
246 /**
247 * @brief qed_chain_alloc - Allocate and initialize a chain
248 *
249 * @param p_hwfn
250 * @param intended_use
251 * @param mode
252 * @param num_elems
253 * @param elem_size
254 * @param p_chain
255 *
256 * @return int
257 */
258 int
259 qed_chain_alloc(struct qed_dev *cdev,
260 enum qed_chain_use_mode intended_use,
261 enum qed_chain_mode mode,
262 enum qed_chain_cnt_type cnt_type,
263 u32 num_elems, size_t elem_size, struct qed_chain *p_chain);
264
265 /**
266 * @brief qed_chain_free - Free chain DMA memory
267 *
268 * @param p_hwfn
269 * @param p_chain
270 */
271 void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain);
272
273 /**
274 * @@brief qed_fw_l2_queue - Get absolute L2 queue ID
275 *
276 * @param p_hwfn
277 * @param src_id - relative to p_hwfn
278 * @param dst_id - absolute per engine
279 *
280 * @return int
281 */
282 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
283 u16 src_id,
284 u16 *dst_id);
285
286 /**
287 * @@brief qed_fw_vport - Get absolute vport ID
288 *
289 * @param p_hwfn
290 * @param src_id - relative to p_hwfn
291 * @param dst_id - absolute per engine
292 *
293 * @return int
294 */
295 int qed_fw_vport(struct qed_hwfn *p_hwfn,
296 u8 src_id,
297 u8 *dst_id);
298
299 /**
300 * @@brief qed_fw_rss_eng - Get absolute RSS engine ID
301 *
302 * @param p_hwfn
303 * @param src_id - relative to p_hwfn
304 * @param dst_id - absolute per engine
305 *
306 * @return int
307 */
308 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
309 u8 src_id,
310 u8 *dst_id);
311
312 /**
313 * @brief qed_llh_add_mac_filter - configures a MAC filter in llh
314 *
315 * @param p_hwfn
316 * @param p_ptt
317 * @param p_filter - MAC to add
318 */
319 int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
320 struct qed_ptt *p_ptt, u8 *p_filter);
321
322 /**
323 * @brief qed_llh_remove_mac_filter - removes a MAC filter from llh
324 *
325 * @param p_hwfn
326 * @param p_ptt
327 * @param p_filter - MAC to remove
328 */
329 void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
330 struct qed_ptt *p_ptt, u8 *p_filter);
331
332 /**
333 * *@brief Cleanup of previous driver remains prior to load
334 *
335 * @param p_hwfn
336 * @param p_ptt
337 * @param id - For PF, engine-relative. For VF, PF-relative.
338 * @param is_vf - true iff cleanup is made for a VF.
339 *
340 * @return int
341 */
342 int qed_final_cleanup(struct qed_hwfn *p_hwfn,
343 struct qed_ptt *p_ptt, u16 id, bool is_vf);
344
345 /**
346 * @brief qed_set_rxq_coalesce - Configure coalesce parameters for an Rx queue
347 * The fact that we can configure coalescing to up to 511, but on varying
348 * accuracy [the bigger the value the less accurate] up to a mistake of 3usec
349 * for the highest values.
350 *
351 * @param p_hwfn
352 * @param p_ptt
353 * @param coalesce - Coalesce value in micro seconds.
354 * @param qid - Queue index.
355 * @param qid - SB Id
356 *
357 * @return int
358 */
359 int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
360 u16 coalesce, u8 qid, u16 sb_id);
361
362 /**
363 * @brief qed_set_txq_coalesce - Configure coalesce parameters for a Tx queue
364 * While the API allows setting coalescing per-qid, all tx queues sharing a
365 * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
366 * otherwise configuration would break.
367 *
368 * @param p_hwfn
369 * @param p_ptt
370 * @param coalesce - Coalesce value in micro seconds.
371 * @param qid - Queue index.
372 * @param qid - SB Id
373 *
374 * @return int
375 */
376 int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
377 u16 coalesce, u8 qid, u16 sb_id);
378 #endif