]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #ifndef MLX5_IB_H | |
34 | #define MLX5_IB_H | |
35 | ||
36 | #include <linux/kernel.h> | |
37 | #include <linux/sched.h> | |
38 | #include <rdma/ib_verbs.h> | |
39 | #include <rdma/ib_smi.h> | |
40 | #include <linux/mlx5/driver.h> | |
41 | #include <linux/mlx5/cq.h> | |
42 | #include <linux/mlx5/qp.h> | |
43 | #include <linux/mlx5/srq.h> | |
44 | #include <linux/types.h> | |
45 | #include <linux/mlx5/transobj.h> | |
46 | #include <rdma/ib_user_verbs.h> | |
47 | #include <rdma/mlx5-abi.h> | |
48 | ||
49 | #define mlx5_ib_dbg(dev, format, arg...) \ | |
50 | pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ | |
51 | __LINE__, current->pid, ##arg) | |
52 | ||
53 | #define mlx5_ib_err(dev, format, arg...) \ | |
54 | pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ | |
55 | __LINE__, current->pid, ##arg) | |
56 | ||
57 | #define mlx5_ib_warn(dev, format, arg...) \ | |
58 | pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ | |
59 | __LINE__, current->pid, ##arg) | |
60 | ||
61 | #define field_avail(type, fld, sz) (offsetof(type, fld) + \ | |
62 | sizeof(((type *)0)->fld) <= (sz)) | |
63 | #define MLX5_IB_DEFAULT_UIDX 0xffffff | |
64 | #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index) | |
65 | ||
66 | #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size) | |
67 | ||
68 | enum { | |
69 | MLX5_IB_MMAP_CMD_SHIFT = 8, | |
70 | MLX5_IB_MMAP_CMD_MASK = 0xff, | |
71 | }; | |
72 | ||
73 | enum mlx5_ib_mmap_cmd { | |
74 | MLX5_IB_MMAP_REGULAR_PAGE = 0, | |
75 | MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1, | |
76 | MLX5_IB_MMAP_WC_PAGE = 2, | |
77 | MLX5_IB_MMAP_NC_PAGE = 3, | |
78 | /* 5 is chosen in order to be compatible with old versions of libmlx5 */ | |
79 | MLX5_IB_MMAP_CORE_CLOCK = 5, | |
80 | MLX5_IB_MMAP_ALLOC_WC = 6, | |
81 | }; | |
82 | ||
83 | enum { | |
84 | MLX5_RES_SCAT_DATA32_CQE = 0x1, | |
85 | MLX5_RES_SCAT_DATA64_CQE = 0x2, | |
86 | MLX5_REQ_SCAT_DATA32_CQE = 0x11, | |
87 | MLX5_REQ_SCAT_DATA64_CQE = 0x22, | |
88 | }; | |
89 | ||
90 | enum mlx5_ib_latency_class { | |
91 | MLX5_IB_LATENCY_CLASS_LOW, | |
92 | MLX5_IB_LATENCY_CLASS_MEDIUM, | |
93 | MLX5_IB_LATENCY_CLASS_HIGH, | |
94 | }; | |
95 | ||
96 | enum mlx5_ib_mad_ifc_flags { | |
97 | MLX5_MAD_IFC_IGNORE_MKEY = 1, | |
98 | MLX5_MAD_IFC_IGNORE_BKEY = 2, | |
99 | MLX5_MAD_IFC_NET_VIEW = 4, | |
100 | }; | |
101 | ||
102 | enum { | |
103 | MLX5_CROSS_CHANNEL_BFREG = 0, | |
104 | }; | |
105 | ||
106 | enum { | |
107 | MLX5_CQE_VERSION_V0, | |
108 | MLX5_CQE_VERSION_V1, | |
109 | }; | |
110 | ||
111 | enum { | |
112 | MLX5_TM_MAX_RNDV_MSG_SIZE = 64, | |
113 | MLX5_TM_MAX_SGE = 1, | |
114 | }; | |
115 | ||
116 | enum { | |
117 | MLX5_IB_INVALID_UAR_INDEX = BIT(31), | |
118 | MLX5_IB_INVALID_BFREG = BIT(31), | |
119 | }; | |
120 | ||
121 | struct mlx5_ib_vma_private_data { | |
122 | struct list_head list; | |
123 | struct vm_area_struct *vma; | |
124 | /* protect vma_private_list add/del */ | |
125 | struct mutex *vma_private_list_mutex; | |
126 | }; | |
127 | ||
128 | struct mlx5_ib_ucontext { | |
129 | struct ib_ucontext ibucontext; | |
130 | struct list_head db_page_list; | |
131 | ||
132 | /* protect doorbell record alloc/free | |
133 | */ | |
134 | struct mutex db_page_mutex; | |
135 | struct mlx5_bfreg_info bfregi; | |
136 | u8 cqe_version; | |
137 | /* Transport Domain number */ | |
138 | u32 tdn; | |
139 | struct list_head vma_private_list; | |
140 | /* protect vma_private_list add/del */ | |
141 | struct mutex vma_private_list_mutex; | |
142 | ||
143 | unsigned long upd_xlt_page; | |
144 | /* protect ODP/KSM */ | |
145 | struct mutex upd_xlt_page_mutex; | |
146 | u64 lib_caps; | |
147 | }; | |
148 | ||
149 | static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) | |
150 | { | |
151 | return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext); | |
152 | } | |
153 | ||
154 | struct mlx5_ib_pd { | |
155 | struct ib_pd ibpd; | |
156 | u32 pdn; | |
157 | }; | |
158 | ||
159 | #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1) | |
160 | #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1) | |
161 | #if (MLX5_IB_FLOW_LAST_PRIO <= 0) | |
162 | #error "Invalid number of bypass priorities" | |
163 | #endif | |
164 | #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1) | |
165 | ||
166 | #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1) | |
167 | #define MLX5_IB_NUM_SNIFFER_FTS 2 | |
168 | struct mlx5_ib_flow_prio { | |
169 | struct mlx5_flow_table *flow_table; | |
170 | unsigned int refcount; | |
171 | }; | |
172 | ||
173 | struct mlx5_ib_flow_handler { | |
174 | struct list_head list; | |
175 | struct ib_flow ibflow; | |
176 | struct mlx5_ib_flow_prio *prio; | |
177 | struct mlx5_flow_handle *rule; | |
178 | }; | |
179 | ||
180 | struct mlx5_ib_flow_db { | |
181 | struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT]; | |
182 | struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS]; | |
183 | struct mlx5_flow_table *lag_demux_ft; | |
184 | /* Protect flow steering bypass flow tables | |
185 | * when add/del flow rules. | |
186 | * only single add/removal of flow steering rule could be done | |
187 | * simultaneously. | |
188 | */ | |
189 | struct mutex lock; | |
190 | }; | |
191 | ||
192 | /* Use macros here so that don't have to duplicate | |
193 | * enum ib_send_flags and enum ib_qp_type for low-level driver | |
194 | */ | |
195 | ||
196 | #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0) | |
197 | #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1) | |
198 | #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2) | |
199 | #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3) | |
200 | #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4) | |
201 | #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END | |
202 | ||
203 | #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 | |
204 | /* | |
205 | * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI | |
206 | * creates the actual hardware QP. | |
207 | */ | |
208 | #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2 | |
209 | #define MLX5_IB_WR_UMR IB_WR_RESERVED1 | |
210 | ||
211 | #define MLX5_IB_UMR_OCTOWORD 16 | |
212 | #define MLX5_IB_UMR_XLT_ALIGNMENT 64 | |
213 | ||
214 | #define MLX5_IB_UPD_XLT_ZAP BIT(0) | |
215 | #define MLX5_IB_UPD_XLT_ENABLE BIT(1) | |
216 | #define MLX5_IB_UPD_XLT_ATOMIC BIT(2) | |
217 | #define MLX5_IB_UPD_XLT_ADDR BIT(3) | |
218 | #define MLX5_IB_UPD_XLT_PD BIT(4) | |
219 | #define MLX5_IB_UPD_XLT_ACCESS BIT(5) | |
220 | #define MLX5_IB_UPD_XLT_INDIRECT BIT(6) | |
221 | ||
222 | /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags. | |
223 | * | |
224 | * These flags are intended for internal use by the mlx5_ib driver, and they | |
225 | * rely on the range reserved for that use in the ib_qp_create_flags enum. | |
226 | */ | |
227 | ||
228 | /* Create a UD QP whose source QP number is 1 */ | |
229 | static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void) | |
230 | { | |
231 | return IB_QP_CREATE_RESERVED_START; | |
232 | } | |
233 | ||
234 | struct wr_list { | |
235 | u16 opcode; | |
236 | u16 next; | |
237 | }; | |
238 | ||
239 | enum mlx5_ib_rq_flags { | |
240 | MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0, | |
241 | MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1, | |
242 | }; | |
243 | ||
244 | struct mlx5_ib_wq { | |
245 | u64 *wrid; | |
246 | u32 *wr_data; | |
247 | struct wr_list *w_list; | |
248 | unsigned *wqe_head; | |
249 | u16 unsig_count; | |
250 | ||
251 | /* serialize post to the work queue | |
252 | */ | |
253 | spinlock_t lock; | |
254 | int wqe_cnt; | |
255 | int max_post; | |
256 | int max_gs; | |
257 | int offset; | |
258 | int wqe_shift; | |
259 | unsigned head; | |
260 | unsigned tail; | |
261 | u16 cur_post; | |
262 | u16 last_poll; | |
263 | void *qend; | |
264 | }; | |
265 | ||
266 | enum mlx5_ib_wq_flags { | |
267 | MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1, | |
268 | MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2, | |
269 | }; | |
270 | ||
271 | #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9 | |
272 | #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16 | |
273 | #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6 | |
274 | #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13 | |
275 | ||
276 | struct mlx5_ib_rwq { | |
277 | struct ib_wq ibwq; | |
278 | struct mlx5_core_qp core_qp; | |
279 | u32 rq_num_pas; | |
280 | u32 log_rq_stride; | |
281 | u32 log_rq_size; | |
282 | u32 rq_page_offset; | |
283 | u32 log_page_size; | |
284 | u32 log_num_strides; | |
285 | u32 two_byte_shift_en; | |
286 | u32 single_stride_log_num_of_bytes; | |
287 | struct ib_umem *umem; | |
288 | size_t buf_size; | |
289 | unsigned int page_shift; | |
290 | int create_type; | |
291 | struct mlx5_db db; | |
292 | u32 user_index; | |
293 | u32 wqe_count; | |
294 | u32 wqe_shift; | |
295 | int wq_sig; | |
296 | u32 create_flags; /* Use enum mlx5_ib_wq_flags */ | |
297 | }; | |
298 | ||
299 | enum { | |
300 | MLX5_QP_USER, | |
301 | MLX5_QP_KERNEL, | |
302 | MLX5_QP_EMPTY | |
303 | }; | |
304 | ||
305 | enum { | |
306 | MLX5_WQ_USER, | |
307 | MLX5_WQ_KERNEL | |
308 | }; | |
309 | ||
310 | struct mlx5_ib_rwq_ind_table { | |
311 | struct ib_rwq_ind_table ib_rwq_ind_tbl; | |
312 | u32 rqtn; | |
313 | }; | |
314 | ||
315 | struct mlx5_ib_ubuffer { | |
316 | struct ib_umem *umem; | |
317 | int buf_size; | |
318 | u64 buf_addr; | |
319 | }; | |
320 | ||
321 | struct mlx5_ib_qp_base { | |
322 | struct mlx5_ib_qp *container_mibqp; | |
323 | struct mlx5_core_qp mqp; | |
324 | struct mlx5_ib_ubuffer ubuffer; | |
325 | }; | |
326 | ||
327 | struct mlx5_ib_qp_trans { | |
328 | struct mlx5_ib_qp_base base; | |
329 | u16 xrcdn; | |
330 | u8 alt_port; | |
331 | u8 atomic_rd_en; | |
332 | u8 resp_depth; | |
333 | }; | |
334 | ||
335 | struct mlx5_ib_rss_qp { | |
336 | u32 tirn; | |
337 | }; | |
338 | ||
339 | struct mlx5_ib_rq { | |
340 | struct mlx5_ib_qp_base base; | |
341 | struct mlx5_ib_wq *rq; | |
342 | struct mlx5_ib_ubuffer ubuffer; | |
343 | struct mlx5_db *doorbell; | |
344 | u32 tirn; | |
345 | u8 state; | |
346 | u32 flags; | |
347 | }; | |
348 | ||
349 | struct mlx5_ib_sq { | |
350 | struct mlx5_ib_qp_base base; | |
351 | struct mlx5_ib_wq *sq; | |
352 | struct mlx5_ib_ubuffer ubuffer; | |
353 | struct mlx5_db *doorbell; | |
354 | u32 tisn; | |
355 | u8 state; | |
356 | }; | |
357 | ||
358 | struct mlx5_ib_raw_packet_qp { | |
359 | struct mlx5_ib_sq sq; | |
360 | struct mlx5_ib_rq rq; | |
361 | }; | |
362 | ||
363 | struct mlx5_bf { | |
364 | int buf_size; | |
365 | unsigned long offset; | |
366 | struct mlx5_sq_bfreg *bfreg; | |
367 | }; | |
368 | ||
369 | struct mlx5_ib_qp { | |
370 | struct ib_qp ibqp; | |
371 | union { | |
372 | struct mlx5_ib_qp_trans trans_qp; | |
373 | struct mlx5_ib_raw_packet_qp raw_packet_qp; | |
374 | struct mlx5_ib_rss_qp rss_qp; | |
375 | }; | |
376 | struct mlx5_buf buf; | |
377 | ||
378 | struct mlx5_db db; | |
379 | struct mlx5_ib_wq rq; | |
380 | ||
381 | u8 sq_signal_bits; | |
382 | u8 next_fence; | |
383 | struct mlx5_ib_wq sq; | |
384 | ||
385 | /* serialize qp state modifications | |
386 | */ | |
387 | struct mutex mutex; | |
388 | u32 flags; | |
389 | u8 port; | |
390 | u8 state; | |
391 | int wq_sig; | |
392 | int scat_cqe; | |
393 | int max_inline_data; | |
394 | struct mlx5_bf bf; | |
395 | int has_rq; | |
396 | ||
397 | /* only for user space QPs. For kernel | |
398 | * we have it from the bf object | |
399 | */ | |
400 | int bfregn; | |
401 | ||
402 | int create_type; | |
403 | ||
404 | /* Store signature errors */ | |
405 | bool signature_en; | |
406 | ||
407 | struct list_head qps_list; | |
408 | struct list_head cq_recv_list; | |
409 | struct list_head cq_send_list; | |
410 | u32 rate_limit; | |
411 | u32 underlay_qpn; | |
412 | bool tunnel_offload_en; | |
413 | }; | |
414 | ||
415 | struct mlx5_ib_cq_buf { | |
416 | struct mlx5_buf buf; | |
417 | struct ib_umem *umem; | |
418 | int cqe_size; | |
419 | int nent; | |
420 | }; | |
421 | ||
422 | enum mlx5_ib_qp_flags { | |
423 | MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, | |
424 | MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, | |
425 | MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL, | |
426 | MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND, | |
427 | MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV, | |
428 | MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5, | |
429 | /* QP uses 1 as its source QP number */ | |
430 | MLX5_IB_QP_SQPN_QP1 = 1 << 6, | |
431 | MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7, | |
432 | MLX5_IB_QP_RSS = 1 << 8, | |
433 | MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9, | |
434 | MLX5_IB_QP_UNDERLAY = 1 << 10, | |
435 | MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11, | |
436 | MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12, | |
437 | }; | |
438 | ||
439 | struct mlx5_umr_wr { | |
440 | struct ib_send_wr wr; | |
441 | u64 virt_addr; | |
442 | u64 offset; | |
443 | struct ib_pd *pd; | |
444 | unsigned int page_shift; | |
445 | unsigned int xlt_size; | |
446 | u64 length; | |
447 | int access_flags; | |
448 | u32 mkey; | |
449 | }; | |
450 | ||
451 | static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr) | |
452 | { | |
453 | return container_of(wr, struct mlx5_umr_wr, wr); | |
454 | } | |
455 | ||
456 | struct mlx5_shared_mr_info { | |
457 | int mr_id; | |
458 | struct ib_umem *umem; | |
459 | }; | |
460 | ||
461 | enum mlx5_ib_cq_pr_flags { | |
462 | MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0, | |
463 | }; | |
464 | ||
465 | struct mlx5_ib_cq { | |
466 | struct ib_cq ibcq; | |
467 | struct mlx5_core_cq mcq; | |
468 | struct mlx5_ib_cq_buf buf; | |
469 | struct mlx5_db db; | |
470 | ||
471 | /* serialize access to the CQ | |
472 | */ | |
473 | spinlock_t lock; | |
474 | ||
475 | /* protect resize cq | |
476 | */ | |
477 | struct mutex resize_mutex; | |
478 | struct mlx5_ib_cq_buf *resize_buf; | |
479 | struct ib_umem *resize_umem; | |
480 | int cqe_size; | |
481 | struct list_head list_send_qp; | |
482 | struct list_head list_recv_qp; | |
483 | u32 create_flags; | |
484 | struct list_head wc_list; | |
485 | enum ib_cq_notify_flags notify_flags; | |
486 | struct work_struct notify_work; | |
487 | u16 private_flags; /* Use mlx5_ib_cq_pr_flags */ | |
488 | }; | |
489 | ||
490 | struct mlx5_ib_wc { | |
491 | struct ib_wc wc; | |
492 | struct list_head list; | |
493 | }; | |
494 | ||
495 | struct mlx5_ib_srq { | |
496 | struct ib_srq ibsrq; | |
497 | struct mlx5_core_srq msrq; | |
498 | struct mlx5_buf buf; | |
499 | struct mlx5_db db; | |
500 | u64 *wrid; | |
501 | /* protect SRQ hanlding | |
502 | */ | |
503 | spinlock_t lock; | |
504 | int head; | |
505 | int tail; | |
506 | u16 wqe_ctr; | |
507 | struct ib_umem *umem; | |
508 | /* serialize arming a SRQ | |
509 | */ | |
510 | struct mutex mutex; | |
511 | int wq_sig; | |
512 | }; | |
513 | ||
514 | struct mlx5_ib_xrcd { | |
515 | struct ib_xrcd ibxrcd; | |
516 | u32 xrcdn; | |
517 | }; | |
518 | ||
519 | enum mlx5_ib_mtt_access_flags { | |
520 | MLX5_IB_MTT_READ = (1 << 0), | |
521 | MLX5_IB_MTT_WRITE = (1 << 1), | |
522 | }; | |
523 | ||
524 | #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) | |
525 | ||
526 | struct mlx5_ib_mr { | |
527 | struct ib_mr ibmr; | |
528 | void *descs; | |
529 | dma_addr_t desc_map; | |
530 | int ndescs; | |
531 | int max_descs; | |
532 | int desc_size; | |
533 | int access_mode; | |
534 | struct mlx5_core_mkey mmkey; | |
535 | struct ib_umem *umem; | |
536 | struct mlx5_shared_mr_info *smr_info; | |
537 | struct list_head list; | |
538 | int order; | |
539 | bool allocated_from_cache; | |
540 | int npages; | |
541 | struct mlx5_ib_dev *dev; | |
542 | u32 out[MLX5_ST_SZ_DW(create_mkey_out)]; | |
543 | struct mlx5_core_sig_ctx *sig; | |
544 | int live; | |
545 | void *descs_alloc; | |
546 | int access_flags; /* Needed for rereg MR */ | |
547 | ||
548 | struct mlx5_ib_mr *parent; | |
549 | atomic_t num_leaf_free; | |
550 | wait_queue_head_t q_leaf_free; | |
551 | }; | |
552 | ||
553 | struct mlx5_ib_mw { | |
554 | struct ib_mw ibmw; | |
555 | struct mlx5_core_mkey mmkey; | |
556 | int ndescs; | |
557 | }; | |
558 | ||
559 | struct mlx5_ib_umr_context { | |
560 | struct ib_cqe cqe; | |
561 | enum ib_wc_status status; | |
562 | struct completion done; | |
563 | }; | |
564 | ||
565 | struct umr_common { | |
566 | struct ib_pd *pd; | |
567 | struct ib_cq *cq; | |
568 | struct ib_qp *qp; | |
569 | /* control access to UMR QP | |
570 | */ | |
571 | struct semaphore sem; | |
572 | }; | |
573 | ||
574 | enum { | |
575 | MLX5_FMR_INVALID, | |
576 | MLX5_FMR_VALID, | |
577 | MLX5_FMR_BUSY, | |
578 | }; | |
579 | ||
580 | struct mlx5_cache_ent { | |
581 | struct list_head head; | |
582 | /* sync access to the cahce entry | |
583 | */ | |
584 | spinlock_t lock; | |
585 | ||
586 | ||
587 | struct dentry *dir; | |
588 | char name[4]; | |
589 | u32 order; | |
590 | u32 xlt; | |
591 | u32 access_mode; | |
592 | u32 page; | |
593 | ||
594 | u32 size; | |
595 | u32 cur; | |
596 | u32 miss; | |
597 | u32 limit; | |
598 | ||
599 | struct dentry *fsize; | |
600 | struct dentry *fcur; | |
601 | struct dentry *fmiss; | |
602 | struct dentry *flimit; | |
603 | ||
604 | struct mlx5_ib_dev *dev; | |
605 | struct work_struct work; | |
606 | struct delayed_work dwork; | |
607 | int pending; | |
608 | struct completion compl; | |
609 | }; | |
610 | ||
611 | struct mlx5_mr_cache { | |
612 | struct workqueue_struct *wq; | |
613 | struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES]; | |
614 | int stopped; | |
615 | struct dentry *root; | |
616 | unsigned long last_add; | |
617 | }; | |
618 | ||
619 | struct mlx5_ib_gsi_qp; | |
620 | ||
621 | struct mlx5_ib_port_resources { | |
622 | struct mlx5_ib_resources *devr; | |
623 | struct mlx5_ib_gsi_qp *gsi; | |
624 | struct work_struct pkey_change_work; | |
625 | }; | |
626 | ||
627 | struct mlx5_ib_resources { | |
628 | struct ib_cq *c0; | |
629 | struct ib_xrcd *x0; | |
630 | struct ib_xrcd *x1; | |
631 | struct ib_pd *p0; | |
632 | struct ib_srq *s0; | |
633 | struct ib_srq *s1; | |
634 | struct mlx5_ib_port_resources ports[2]; | |
635 | /* Protects changes to the port resources */ | |
636 | struct mutex mutex; | |
637 | }; | |
638 | ||
639 | struct mlx5_ib_counters { | |
640 | const char **names; | |
641 | size_t *offsets; | |
642 | u32 num_q_counters; | |
643 | u32 num_cong_counters; | |
644 | u16 set_id; | |
645 | }; | |
646 | ||
647 | struct mlx5_ib_port { | |
648 | struct mlx5_ib_counters cnts; | |
649 | }; | |
650 | ||
651 | struct mlx5_roce { | |
652 | /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL | |
653 | * netdev pointer | |
654 | */ | |
655 | rwlock_t netdev_lock; | |
656 | struct net_device *netdev; | |
657 | struct notifier_block nb; | |
658 | atomic_t next_port; | |
659 | enum ib_port_state last_port_state; | |
660 | }; | |
661 | ||
662 | struct mlx5_ib_dbg_param { | |
663 | int offset; | |
664 | struct mlx5_ib_dev *dev; | |
665 | struct dentry *dentry; | |
666 | }; | |
667 | ||
668 | enum mlx5_ib_dbg_cc_types { | |
669 | MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE, | |
670 | MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI, | |
671 | MLX5_IB_DBG_CC_RP_TIME_RESET, | |
672 | MLX5_IB_DBG_CC_RP_BYTE_RESET, | |
673 | MLX5_IB_DBG_CC_RP_THRESHOLD, | |
674 | MLX5_IB_DBG_CC_RP_AI_RATE, | |
675 | MLX5_IB_DBG_CC_RP_HAI_RATE, | |
676 | MLX5_IB_DBG_CC_RP_MIN_DEC_FAC, | |
677 | MLX5_IB_DBG_CC_RP_MIN_RATE, | |
678 | MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP, | |
679 | MLX5_IB_DBG_CC_RP_DCE_TCP_G, | |
680 | MLX5_IB_DBG_CC_RP_DCE_TCP_RTT, | |
681 | MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD, | |
682 | MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE, | |
683 | MLX5_IB_DBG_CC_RP_GD, | |
684 | MLX5_IB_DBG_CC_NP_CNP_DSCP, | |
685 | MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE, | |
686 | MLX5_IB_DBG_CC_NP_CNP_PRIO, | |
687 | MLX5_IB_DBG_CC_MAX, | |
688 | }; | |
689 | ||
690 | struct mlx5_ib_dbg_cc_params { | |
691 | struct dentry *root; | |
692 | struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX]; | |
693 | }; | |
694 | ||
695 | enum { | |
696 | MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100, | |
697 | }; | |
698 | ||
699 | struct mlx5_ib_dbg_delay_drop { | |
700 | struct dentry *dir_debugfs; | |
701 | struct dentry *rqs_cnt_debugfs; | |
702 | struct dentry *events_cnt_debugfs; | |
703 | struct dentry *timeout_debugfs; | |
704 | }; | |
705 | ||
706 | struct mlx5_ib_delay_drop { | |
707 | struct mlx5_ib_dev *dev; | |
708 | struct work_struct delay_drop_work; | |
709 | /* serialize setting of delay drop */ | |
710 | struct mutex lock; | |
711 | u32 timeout; | |
712 | bool activate; | |
713 | atomic_t events_cnt; | |
714 | atomic_t rqs_cnt; | |
715 | struct mlx5_ib_dbg_delay_drop *dbg; | |
716 | }; | |
717 | ||
718 | enum mlx5_ib_stages { | |
719 | MLX5_IB_STAGE_INIT, | |
720 | MLX5_IB_STAGE_CAPS, | |
721 | MLX5_IB_STAGE_ROCE, | |
722 | MLX5_IB_STAGE_DEVICE_RESOURCES, | |
723 | MLX5_IB_STAGE_ODP, | |
724 | MLX5_IB_STAGE_COUNTERS, | |
725 | MLX5_IB_STAGE_CONG_DEBUGFS, | |
726 | MLX5_IB_STAGE_UAR, | |
727 | MLX5_IB_STAGE_BFREG, | |
728 | MLX5_IB_STAGE_IB_REG, | |
729 | MLX5_IB_STAGE_UMR_RESOURCES, | |
730 | MLX5_IB_STAGE_DELAY_DROP, | |
731 | MLX5_IB_STAGE_CLASS_ATTR, | |
732 | MLX5_IB_STAGE_MAX, | |
733 | }; | |
734 | ||
735 | struct mlx5_ib_stage { | |
736 | int (*init)(struct mlx5_ib_dev *dev); | |
737 | void (*cleanup)(struct mlx5_ib_dev *dev); | |
738 | }; | |
739 | ||
740 | #define STAGE_CREATE(_stage, _init, _cleanup) \ | |
741 | .stage[_stage] = {.init = _init, .cleanup = _cleanup} | |
742 | ||
743 | struct mlx5_ib_profile { | |
744 | struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX]; | |
745 | }; | |
746 | ||
747 | struct mlx5_ib_dev { | |
748 | struct ib_device ib_dev; | |
749 | struct mlx5_core_dev *mdev; | |
750 | struct mlx5_roce roce; | |
751 | int num_ports; | |
752 | /* serialize update of capability mask | |
753 | */ | |
754 | struct mutex cap_mask_mutex; | |
755 | bool ib_active; | |
756 | struct umr_common umrc; | |
757 | /* sync used page count stats | |
758 | */ | |
759 | struct mlx5_ib_resources devr; | |
760 | struct mlx5_mr_cache cache; | |
761 | struct timer_list delay_timer; | |
762 | /* Prevents soft lock on massive reg MRs */ | |
763 | struct mutex slow_path_mutex; | |
764 | int fill_delay; | |
765 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | |
766 | struct ib_odp_caps odp_caps; | |
767 | u64 odp_max_size; | |
768 | /* | |
769 | * Sleepable RCU that prevents destruction of MRs while they are still | |
770 | * being used by a page fault handler. | |
771 | */ | |
772 | struct srcu_struct mr_srcu; | |
773 | u32 null_mkey; | |
774 | #endif | |
775 | struct mlx5_ib_flow_db flow_db; | |
776 | /* protect resources needed as part of reset flow */ | |
777 | spinlock_t reset_flow_resource_lock; | |
778 | struct list_head qp_list; | |
779 | /* Array with num_ports elements */ | |
780 | struct mlx5_ib_port *port; | |
781 | struct mlx5_sq_bfreg bfreg; | |
782 | struct mlx5_sq_bfreg fp_bfreg; | |
783 | struct mlx5_ib_delay_drop delay_drop; | |
784 | struct mlx5_ib_dbg_cc_params *dbg_cc_params; | |
785 | const struct mlx5_ib_profile *profile; | |
786 | ||
787 | /* protect the user_td */ | |
788 | struct mutex lb_mutex; | |
789 | u32 user_td; | |
790 | u8 umr_fence; | |
791 | }; | |
792 | ||
793 | static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) | |
794 | { | |
795 | return container_of(mcq, struct mlx5_ib_cq, mcq); | |
796 | } | |
797 | ||
798 | static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) | |
799 | { | |
800 | return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd); | |
801 | } | |
802 | ||
803 | static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev) | |
804 | { | |
805 | return container_of(ibdev, struct mlx5_ib_dev, ib_dev); | |
806 | } | |
807 | ||
808 | static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) | |
809 | { | |
810 | return container_of(ibcq, struct mlx5_ib_cq, ibcq); | |
811 | } | |
812 | ||
813 | static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) | |
814 | { | |
815 | return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp; | |
816 | } | |
817 | ||
818 | static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp) | |
819 | { | |
820 | return container_of(core_qp, struct mlx5_ib_rwq, core_qp); | |
821 | } | |
822 | ||
823 | static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey) | |
824 | { | |
825 | return container_of(mmkey, struct mlx5_ib_mr, mmkey); | |
826 | } | |
827 | ||
828 | static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) | |
829 | { | |
830 | return container_of(ibpd, struct mlx5_ib_pd, ibpd); | |
831 | } | |
832 | ||
833 | static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq) | |
834 | { | |
835 | return container_of(ibsrq, struct mlx5_ib_srq, ibsrq); | |
836 | } | |
837 | ||
838 | static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp) | |
839 | { | |
840 | return container_of(ibqp, struct mlx5_ib_qp, ibqp); | |
841 | } | |
842 | ||
843 | static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq) | |
844 | { | |
845 | return container_of(ibwq, struct mlx5_ib_rwq, ibwq); | |
846 | } | |
847 | ||
848 | static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl) | |
849 | { | |
850 | return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl); | |
851 | } | |
852 | ||
853 | static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) | |
854 | { | |
855 | return container_of(msrq, struct mlx5_ib_srq, msrq); | |
856 | } | |
857 | ||
858 | static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr) | |
859 | { | |
860 | return container_of(ibmr, struct mlx5_ib_mr, ibmr); | |
861 | } | |
862 | ||
863 | static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw) | |
864 | { | |
865 | return container_of(ibmw, struct mlx5_ib_mw, ibmw); | |
866 | } | |
867 | ||
868 | int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, | |
869 | struct mlx5_db *db); | |
870 | void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); | |
871 | void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); | |
872 | void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); | |
873 | void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); | |
874 | int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, | |
875 | u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, | |
876 | const void *in_mad, void *response_mad); | |
877 | struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, | |
878 | struct ib_udata *udata); | |
879 | int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); | |
880 | int mlx5_ib_destroy_ah(struct ib_ah *ah); | |
881 | struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | |
882 | struct ib_srq_init_attr *init_attr, | |
883 | struct ib_udata *udata); | |
884 | int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |
885 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); | |
886 | int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); | |
887 | int mlx5_ib_destroy_srq(struct ib_srq *srq); | |
888 | int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |
889 | struct ib_recv_wr **bad_wr); | |
890 | struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, | |
891 | struct ib_qp_init_attr *init_attr, | |
892 | struct ib_udata *udata); | |
893 | int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
894 | int attr_mask, struct ib_udata *udata); | |
895 | int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, | |
896 | struct ib_qp_init_attr *qp_init_attr); | |
897 | int mlx5_ib_destroy_qp(struct ib_qp *qp); | |
898 | int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
899 | struct ib_send_wr **bad_wr); | |
900 | int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
901 | struct ib_recv_wr **bad_wr); | |
902 | void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); | |
903 | int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, | |
904 | void *buffer, u32 length, | |
905 | struct mlx5_ib_qp_base *base); | |
906 | struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, | |
907 | const struct ib_cq_init_attr *attr, | |
908 | struct ib_ucontext *context, | |
909 | struct ib_udata *udata); | |
910 | int mlx5_ib_destroy_cq(struct ib_cq *cq); | |
911 | int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); | |
912 | int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); | |
913 | int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); | |
914 | int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); | |
915 | struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc); | |
916 | struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |
917 | u64 virt_addr, int access_flags, | |
918 | struct ib_udata *udata); | |
919 | struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, | |
920 | struct ib_udata *udata); | |
921 | int mlx5_ib_dealloc_mw(struct ib_mw *mw); | |
922 | int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, | |
923 | int page_shift, int flags); | |
924 | struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, | |
925 | int access_flags); | |
926 | void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr); | |
927 | int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, | |
928 | u64 length, u64 virt_addr, int access_flags, | |
929 | struct ib_pd *pd, struct ib_udata *udata); | |
930 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr); | |
931 | struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, | |
932 | enum ib_mr_type mr_type, | |
933 | u32 max_num_sg); | |
934 | int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, | |
935 | unsigned int *sg_offset); | |
936 | int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | |
937 | const struct ib_wc *in_wc, const struct ib_grh *in_grh, | |
938 | const struct ib_mad_hdr *in, size_t in_mad_size, | |
939 | struct ib_mad_hdr *out, size_t *out_mad_size, | |
940 | u16 *out_mad_pkey_index); | |
941 | struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, | |
942 | struct ib_ucontext *context, | |
943 | struct ib_udata *udata); | |
944 | int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd); | |
945 | int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); | |
946 | int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); | |
947 | int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, | |
948 | struct ib_smp *out_mad); | |
949 | int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, | |
950 | __be64 *sys_image_guid); | |
951 | int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, | |
952 | u16 *max_pkeys); | |
953 | int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, | |
954 | u32 *vendor_id); | |
955 | int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc); | |
956 | int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid); | |
957 | int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index, | |
958 | u16 *pkey); | |
959 | int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index, | |
960 | union ib_gid *gid); | |
961 | int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port, | |
962 | struct ib_port_attr *props); | |
963 | int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, | |
964 | struct ib_port_attr *props); | |
965 | int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); | |
966 | void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev); | |
967 | void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, | |
968 | unsigned long max_page_shift, | |
969 | int *count, int *shift, | |
970 | int *ncont, int *order); | |
971 | void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, | |
972 | int page_shift, size_t offset, size_t num_pages, | |
973 | __be64 *pas, int access_flags); | |
974 | void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, | |
975 | int page_shift, __be64 *pas, int access_flags); | |
976 | void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); | |
977 | int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq); | |
978 | int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); | |
979 | int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); | |
980 | ||
981 | struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry); | |
982 | void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); | |
983 | int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, | |
984 | struct ib_mr_status *mr_status); | |
985 | struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, | |
986 | struct ib_wq_init_attr *init_attr, | |
987 | struct ib_udata *udata); | |
988 | int mlx5_ib_destroy_wq(struct ib_wq *wq); | |
989 | int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, | |
990 | u32 wq_attr_mask, struct ib_udata *udata); | |
991 | struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, | |
992 | struct ib_rwq_ind_table_init_attr *init_attr, | |
993 | struct ib_udata *udata); | |
994 | int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); | |
995 | ||
996 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | |
997 | void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev); | |
998 | void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context, | |
999 | struct mlx5_pagefault *pfault); | |
1000 | int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev); | |
1001 | int __init mlx5_ib_odp_init(void); | |
1002 | void mlx5_ib_odp_cleanup(void); | |
1003 | void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, | |
1004 | unsigned long end); | |
1005 | void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent); | |
1006 | void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset, | |
1007 | size_t nentries, struct mlx5_ib_mr *mr, int flags); | |
1008 | #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ | |
1009 | static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) | |
1010 | { | |
1011 | return; | |
1012 | } | |
1013 | ||
1014 | static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; } | |
1015 | static inline int mlx5_ib_odp_init(void) { return 0; } | |
1016 | static inline void mlx5_ib_odp_cleanup(void) {} | |
1017 | static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {} | |
1018 | static inline void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset, | |
1019 | size_t nentries, struct mlx5_ib_mr *mr, | |
1020 | int flags) {} | |
1021 | ||
1022 | #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ | |
1023 | ||
1024 | int mlx5_ib_get_vf_config(struct ib_device *device, int vf, | |
1025 | u8 port, struct ifla_vf_info *info); | |
1026 | int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf, | |
1027 | u8 port, int state); | |
1028 | int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, | |
1029 | u8 port, struct ifla_vf_stats *stats); | |
1030 | int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port, | |
1031 | u64 guid, int type); | |
1032 | ||
1033 | __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, | |
1034 | int index); | |
1035 | int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num, | |
1036 | int index, enum ib_gid_type *gid_type); | |
1037 | ||
1038 | void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev); | |
1039 | int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev); | |
1040 | ||
1041 | /* GSI QP helper functions */ | |
1042 | struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd, | |
1043 | struct ib_qp_init_attr *init_attr); | |
1044 | int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp); | |
1045 | int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, | |
1046 | int attr_mask); | |
1047 | int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, | |
1048 | int qp_attr_mask, | |
1049 | struct ib_qp_init_attr *qp_init_attr); | |
1050 | int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr, | |
1051 | struct ib_send_wr **bad_wr); | |
1052 | int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr, | |
1053 | struct ib_recv_wr **bad_wr); | |
1054 | void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi); | |
1055 | ||
1056 | int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc); | |
1057 | ||
1058 | void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, | |
1059 | int bfregn); | |
1060 | ||
1061 | static inline void init_query_mad(struct ib_smp *mad) | |
1062 | { | |
1063 | mad->base_version = 1; | |
1064 | mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; | |
1065 | mad->class_version = 1; | |
1066 | mad->method = IB_MGMT_METHOD_GET; | |
1067 | } | |
1068 | ||
1069 | static inline u8 convert_access(int acc) | |
1070 | { | |
1071 | return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | | |
1072 | (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | | |
1073 | (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | | |
1074 | (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | | |
1075 | MLX5_PERM_LOCAL_READ; | |
1076 | } | |
1077 | ||
1078 | static inline int is_qp1(enum ib_qp_type qp_type) | |
1079 | { | |
1080 | return qp_type == MLX5_IB_QPT_HW_GSI; | |
1081 | } | |
1082 | ||
1083 | #define MLX5_MAX_UMR_SHIFT 16 | |
1084 | #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT) | |
1085 | ||
1086 | static inline u32 check_cq_create_flags(u32 flags) | |
1087 | { | |
1088 | /* | |
1089 | * It returns non-zero value for unsupported CQ | |
1090 | * create flags, otherwise it returns zero. | |
1091 | */ | |
1092 | return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN | | |
1093 | IB_CQ_FLAGS_TIMESTAMP_COMPLETION)); | |
1094 | } | |
1095 | ||
1096 | static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx, | |
1097 | u32 *user_index) | |
1098 | { | |
1099 | if (cqe_version) { | |
1100 | if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) || | |
1101 | (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK)) | |
1102 | return -EINVAL; | |
1103 | *user_index = cmd_uidx; | |
1104 | } else { | |
1105 | *user_index = MLX5_IB_DEFAULT_UIDX; | |
1106 | } | |
1107 | ||
1108 | return 0; | |
1109 | } | |
1110 | ||
1111 | static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext, | |
1112 | struct mlx5_ib_create_qp *ucmd, | |
1113 | int inlen, | |
1114 | u32 *user_index) | |
1115 | { | |
1116 | u8 cqe_version = ucontext->cqe_version; | |
1117 | ||
1118 | if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) && | |
1119 | !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) | |
1120 | return 0; | |
1121 | ||
1122 | if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) != | |
1123 | !!cqe_version)) | |
1124 | return -EINVAL; | |
1125 | ||
1126 | return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); | |
1127 | } | |
1128 | ||
1129 | static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext, | |
1130 | struct mlx5_ib_create_srq *ucmd, | |
1131 | int inlen, | |
1132 | u32 *user_index) | |
1133 | { | |
1134 | u8 cqe_version = ucontext->cqe_version; | |
1135 | ||
1136 | if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) && | |
1137 | !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) | |
1138 | return 0; | |
1139 | ||
1140 | if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) != | |
1141 | !!cqe_version)) | |
1142 | return -EINVAL; | |
1143 | ||
1144 | return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); | |
1145 | } | |
1146 | ||
1147 | static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support) | |
1148 | { | |
1149 | return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ? | |
1150 | MLX5_UARS_IN_PAGE : 1; | |
1151 | } | |
1152 | ||
1153 | static inline int get_num_static_uars(struct mlx5_ib_dev *dev, | |
1154 | struct mlx5_bfreg_info *bfregi) | |
1155 | { | |
1156 | return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages; | |
1157 | } | |
1158 | ||
1159 | #endif /* MLX5_IB_H */ |