]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/infiniband/hw/mlx5/mlx5_ib.h
Merge remote-tracking branches 'regulator/topic/of', 'regulator/topic/pv88080', ...
[mirror_ubuntu-hirsute-kernel.git] / drivers / infiniband / hw / mlx5 / mlx5_ib.h
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef MLX5_IB_H
34 #define MLX5_IB_H
35
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <rdma/ib_verbs.h>
39 #include <rdma/ib_smi.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/cq.h>
42 #include <linux/mlx5/qp.h>
43 #include <linux/mlx5/srq.h>
44 #include <linux/types.h>
45 #include <linux/mlx5/transobj.h>
46 #include <rdma/ib_user_verbs.h>
47
48 #define mlx5_ib_dbg(dev, format, arg...) \
49 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
50 __LINE__, current->pid, ##arg)
51
52 #define mlx5_ib_err(dev, format, arg...) \
53 pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
54 __LINE__, current->pid, ##arg)
55
56 #define mlx5_ib_warn(dev, format, arg...) \
57 pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
58 __LINE__, current->pid, ##arg)
59
60 #define field_avail(type, fld, sz) (offsetof(type, fld) + \
61 sizeof(((type *)0)->fld) <= (sz))
62 #define MLX5_IB_DEFAULT_UIDX 0xffffff
63 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
64
65 enum {
66 MLX5_IB_MMAP_CMD_SHIFT = 8,
67 MLX5_IB_MMAP_CMD_MASK = 0xff,
68 };
69
70 enum mlx5_ib_mmap_cmd {
71 MLX5_IB_MMAP_REGULAR_PAGE = 0,
72 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
73 MLX5_IB_MMAP_WC_PAGE = 2,
74 MLX5_IB_MMAP_NC_PAGE = 3,
75 /* 5 is chosen in order to be compatible with old versions of libmlx5 */
76 MLX5_IB_MMAP_CORE_CLOCK = 5,
77 };
78
79 enum {
80 MLX5_RES_SCAT_DATA32_CQE = 0x1,
81 MLX5_RES_SCAT_DATA64_CQE = 0x2,
82 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
83 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
84 };
85
86 enum mlx5_ib_latency_class {
87 MLX5_IB_LATENCY_CLASS_LOW,
88 MLX5_IB_LATENCY_CLASS_MEDIUM,
89 MLX5_IB_LATENCY_CLASS_HIGH,
90 MLX5_IB_LATENCY_CLASS_FAST_PATH
91 };
92
93 enum mlx5_ib_mad_ifc_flags {
94 MLX5_MAD_IFC_IGNORE_MKEY = 1,
95 MLX5_MAD_IFC_IGNORE_BKEY = 2,
96 MLX5_MAD_IFC_NET_VIEW = 4,
97 };
98
99 enum {
100 MLX5_CROSS_CHANNEL_UUAR = 0,
101 };
102
103 enum {
104 MLX5_CQE_VERSION_V0,
105 MLX5_CQE_VERSION_V1,
106 };
107
108 struct mlx5_ib_vma_private_data {
109 struct list_head list;
110 struct vm_area_struct *vma;
111 };
112
113 struct mlx5_ib_ucontext {
114 struct ib_ucontext ibucontext;
115 struct list_head db_page_list;
116
117 /* protect doorbell record alloc/free
118 */
119 struct mutex db_page_mutex;
120 struct mlx5_uuar_info uuari;
121 u8 cqe_version;
122 /* Transport Domain number */
123 u32 tdn;
124 struct list_head vma_private_list;
125 };
126
127 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
128 {
129 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
130 }
131
132 struct mlx5_ib_pd {
133 struct ib_pd ibpd;
134 u32 pdn;
135 };
136
137 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
138 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
139 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
140 #error "Invalid number of bypass priorities"
141 #endif
142 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
143
144 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
145 struct mlx5_ib_flow_prio {
146 struct mlx5_flow_table *flow_table;
147 unsigned int refcount;
148 };
149
150 struct mlx5_ib_flow_handler {
151 struct list_head list;
152 struct ib_flow ibflow;
153 unsigned int prio;
154 struct mlx5_flow_rule *rule;
155 };
156
157 struct mlx5_ib_flow_db {
158 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
159 /* Protect flow steering bypass flow tables
160 * when add/del flow rules.
161 * only single add/removal of flow steering rule could be done
162 * simultaneously.
163 */
164 struct mutex lock;
165 };
166
167 /* Use macros here so that don't have to duplicate
168 * enum ib_send_flags and enum ib_qp_type for low-level driver
169 */
170
171 #define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
172 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
173 #define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
174
175 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 3)
176 #define MLX5_IB_SEND_UMR_UPDATE_PD (IB_SEND_RESERVED_START << 4)
177 #define MLX5_IB_SEND_UMR_UPDATE_ACCESS IB_SEND_RESERVED_END
178
179 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
180 /*
181 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
182 * creates the actual hardware QP.
183 */
184 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
185 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
186
187 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
188 *
189 * These flags are intended for internal use by the mlx5_ib driver, and they
190 * rely on the range reserved for that use in the ib_qp_create_flags enum.
191 */
192
193 /* Create a UD QP whose source QP number is 1 */
194 static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
195 {
196 return IB_QP_CREATE_RESERVED_START;
197 }
198
199 struct wr_list {
200 u16 opcode;
201 u16 next;
202 };
203
204 struct mlx5_ib_wq {
205 u64 *wrid;
206 u32 *wr_data;
207 struct wr_list *w_list;
208 unsigned *wqe_head;
209 u16 unsig_count;
210
211 /* serialize post to the work queue
212 */
213 spinlock_t lock;
214 int wqe_cnt;
215 int max_post;
216 int max_gs;
217 int offset;
218 int wqe_shift;
219 unsigned head;
220 unsigned tail;
221 u16 cur_post;
222 u16 last_poll;
223 void *qend;
224 };
225
226 struct mlx5_ib_rwq {
227 struct ib_wq ibwq;
228 u32 rqn;
229 u32 rq_num_pas;
230 u32 log_rq_stride;
231 u32 log_rq_size;
232 u32 rq_page_offset;
233 u32 log_page_size;
234 struct ib_umem *umem;
235 size_t buf_size;
236 unsigned int page_shift;
237 int create_type;
238 struct mlx5_db db;
239 u32 user_index;
240 u32 wqe_count;
241 u32 wqe_shift;
242 int wq_sig;
243 };
244
245 enum {
246 MLX5_QP_USER,
247 MLX5_QP_KERNEL,
248 MLX5_QP_EMPTY
249 };
250
251 enum {
252 MLX5_WQ_USER,
253 MLX5_WQ_KERNEL
254 };
255
256 struct mlx5_ib_rwq_ind_table {
257 struct ib_rwq_ind_table ib_rwq_ind_tbl;
258 u32 rqtn;
259 };
260
261 /*
262 * Connect-IB can trigger up to four concurrent pagefaults
263 * per-QP.
264 */
265 enum mlx5_ib_pagefault_context {
266 MLX5_IB_PAGEFAULT_RESPONDER_READ,
267 MLX5_IB_PAGEFAULT_REQUESTOR_READ,
268 MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
269 MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
270 MLX5_IB_PAGEFAULT_CONTEXTS
271 };
272
273 static inline enum mlx5_ib_pagefault_context
274 mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
275 {
276 return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
277 }
278
279 struct mlx5_ib_pfault {
280 struct work_struct work;
281 struct mlx5_pagefault mpfault;
282 };
283
284 struct mlx5_ib_ubuffer {
285 struct ib_umem *umem;
286 int buf_size;
287 u64 buf_addr;
288 };
289
290 struct mlx5_ib_qp_base {
291 struct mlx5_ib_qp *container_mibqp;
292 struct mlx5_core_qp mqp;
293 struct mlx5_ib_ubuffer ubuffer;
294 };
295
296 struct mlx5_ib_qp_trans {
297 struct mlx5_ib_qp_base base;
298 u16 xrcdn;
299 u8 alt_port;
300 u8 atomic_rd_en;
301 u8 resp_depth;
302 };
303
304 struct mlx5_ib_rss_qp {
305 u32 tirn;
306 };
307
308 struct mlx5_ib_rq {
309 struct mlx5_ib_qp_base base;
310 struct mlx5_ib_wq *rq;
311 struct mlx5_ib_ubuffer ubuffer;
312 struct mlx5_db *doorbell;
313 u32 tirn;
314 u8 state;
315 };
316
317 struct mlx5_ib_sq {
318 struct mlx5_ib_qp_base base;
319 struct mlx5_ib_wq *sq;
320 struct mlx5_ib_ubuffer ubuffer;
321 struct mlx5_db *doorbell;
322 u32 tisn;
323 u8 state;
324 };
325
326 struct mlx5_ib_raw_packet_qp {
327 struct mlx5_ib_sq sq;
328 struct mlx5_ib_rq rq;
329 };
330
331 struct mlx5_ib_qp {
332 struct ib_qp ibqp;
333 union {
334 struct mlx5_ib_qp_trans trans_qp;
335 struct mlx5_ib_raw_packet_qp raw_packet_qp;
336 struct mlx5_ib_rss_qp rss_qp;
337 };
338 struct mlx5_buf buf;
339
340 struct mlx5_db db;
341 struct mlx5_ib_wq rq;
342
343 u8 sq_signal_bits;
344 u8 fm_cache;
345 struct mlx5_ib_wq sq;
346
347 /* serialize qp state modifications
348 */
349 struct mutex mutex;
350 u32 flags;
351 u8 port;
352 u8 state;
353 int wq_sig;
354 int scat_cqe;
355 int max_inline_data;
356 struct mlx5_bf *bf;
357 int has_rq;
358
359 /* only for user space QPs. For kernel
360 * we have it from the bf object
361 */
362 int uuarn;
363
364 int create_type;
365
366 /* Store signature errors */
367 bool signature_en;
368
369 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
370 /*
371 * A flag that is true for QP's that are in a state that doesn't
372 * allow page faults, and shouldn't schedule any more faults.
373 */
374 int disable_page_faults;
375 /*
376 * The disable_page_faults_lock protects a QP's disable_page_faults
377 * field, allowing for a thread to atomically check whether the QP
378 * allows page faults, and if so schedule a page fault.
379 */
380 spinlock_t disable_page_faults_lock;
381 struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
382 #endif
383 struct list_head qps_list;
384 struct list_head cq_recv_list;
385 struct list_head cq_send_list;
386 };
387
388 struct mlx5_ib_cq_buf {
389 struct mlx5_buf buf;
390 struct ib_umem *umem;
391 int cqe_size;
392 int nent;
393 };
394
395 enum mlx5_ib_qp_flags {
396 MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
397 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
398 MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
399 MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
400 MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
401 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
402 /* QP uses 1 as its source QP number */
403 MLX5_IB_QP_SQPN_QP1 = 1 << 6,
404 MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
405 MLX5_IB_QP_RSS = 1 << 8,
406 };
407
408 struct mlx5_umr_wr {
409 struct ib_send_wr wr;
410 union {
411 u64 virt_addr;
412 u64 offset;
413 } target;
414 struct ib_pd *pd;
415 unsigned int page_shift;
416 unsigned int npages;
417 u32 length;
418 int access_flags;
419 u32 mkey;
420 };
421
422 static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
423 {
424 return container_of(wr, struct mlx5_umr_wr, wr);
425 }
426
427 struct mlx5_shared_mr_info {
428 int mr_id;
429 struct ib_umem *umem;
430 };
431
432 struct mlx5_ib_cq {
433 struct ib_cq ibcq;
434 struct mlx5_core_cq mcq;
435 struct mlx5_ib_cq_buf buf;
436 struct mlx5_db db;
437
438 /* serialize access to the CQ
439 */
440 spinlock_t lock;
441
442 /* protect resize cq
443 */
444 struct mutex resize_mutex;
445 struct mlx5_ib_cq_buf *resize_buf;
446 struct ib_umem *resize_umem;
447 int cqe_size;
448 struct list_head list_send_qp;
449 struct list_head list_recv_qp;
450 u32 create_flags;
451 struct list_head wc_list;
452 enum ib_cq_notify_flags notify_flags;
453 struct work_struct notify_work;
454 };
455
456 struct mlx5_ib_wc {
457 struct ib_wc wc;
458 struct list_head list;
459 };
460
461 struct mlx5_ib_srq {
462 struct ib_srq ibsrq;
463 struct mlx5_core_srq msrq;
464 struct mlx5_buf buf;
465 struct mlx5_db db;
466 u64 *wrid;
467 /* protect SRQ hanlding
468 */
469 spinlock_t lock;
470 int head;
471 int tail;
472 u16 wqe_ctr;
473 struct ib_umem *umem;
474 /* serialize arming a SRQ
475 */
476 struct mutex mutex;
477 int wq_sig;
478 };
479
480 struct mlx5_ib_xrcd {
481 struct ib_xrcd ibxrcd;
482 u32 xrcdn;
483 };
484
485 enum mlx5_ib_mtt_access_flags {
486 MLX5_IB_MTT_READ = (1 << 0),
487 MLX5_IB_MTT_WRITE = (1 << 1),
488 };
489
490 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
491
492 struct mlx5_ib_mr {
493 struct ib_mr ibmr;
494 void *descs;
495 dma_addr_t desc_map;
496 int ndescs;
497 int max_descs;
498 int desc_size;
499 int access_mode;
500 struct mlx5_core_mkey mmkey;
501 struct ib_umem *umem;
502 struct mlx5_shared_mr_info *smr_info;
503 struct list_head list;
504 int order;
505 int umred;
506 int npages;
507 struct mlx5_ib_dev *dev;
508 struct mlx5_create_mkey_mbox_out out;
509 struct mlx5_core_sig_ctx *sig;
510 int live;
511 void *descs_alloc;
512 int access_flags; /* Needed for rereg MR */
513 };
514
515 struct mlx5_ib_mw {
516 struct ib_mw ibmw;
517 struct mlx5_core_mkey mmkey;
518 };
519
520 struct mlx5_ib_umr_context {
521 struct ib_cqe cqe;
522 enum ib_wc_status status;
523 struct completion done;
524 };
525
526 struct umr_common {
527 struct ib_pd *pd;
528 struct ib_cq *cq;
529 struct ib_qp *qp;
530 /* control access to UMR QP
531 */
532 struct semaphore sem;
533 };
534
535 enum {
536 MLX5_FMR_INVALID,
537 MLX5_FMR_VALID,
538 MLX5_FMR_BUSY,
539 };
540
541 struct mlx5_cache_ent {
542 struct list_head head;
543 /* sync access to the cahce entry
544 */
545 spinlock_t lock;
546
547
548 struct dentry *dir;
549 char name[4];
550 u32 order;
551 u32 size;
552 u32 cur;
553 u32 miss;
554 u32 limit;
555
556 struct dentry *fsize;
557 struct dentry *fcur;
558 struct dentry *fmiss;
559 struct dentry *flimit;
560
561 struct mlx5_ib_dev *dev;
562 struct work_struct work;
563 struct delayed_work dwork;
564 int pending;
565 };
566
567 struct mlx5_mr_cache {
568 struct workqueue_struct *wq;
569 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
570 int stopped;
571 struct dentry *root;
572 unsigned long last_add;
573 };
574
575 struct mlx5_ib_gsi_qp;
576
577 struct mlx5_ib_port_resources {
578 struct mlx5_ib_resources *devr;
579 struct mlx5_ib_gsi_qp *gsi;
580 struct work_struct pkey_change_work;
581 };
582
583 struct mlx5_ib_resources {
584 struct ib_cq *c0;
585 struct ib_xrcd *x0;
586 struct ib_xrcd *x1;
587 struct ib_pd *p0;
588 struct ib_srq *s0;
589 struct ib_srq *s1;
590 struct mlx5_ib_port_resources ports[2];
591 /* Protects changes to the port resources */
592 struct mutex mutex;
593 };
594
595 struct mlx5_ib_port {
596 u16 q_cnt_id;
597 };
598
599 struct mlx5_roce {
600 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
601 * netdev pointer
602 */
603 rwlock_t netdev_lock;
604 struct net_device *netdev;
605 struct notifier_block nb;
606 };
607
608 struct mlx5_ib_dev {
609 struct ib_device ib_dev;
610 struct mlx5_core_dev *mdev;
611 struct mlx5_roce roce;
612 MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
613 int num_ports;
614 /* serialize update of capability mask
615 */
616 struct mutex cap_mask_mutex;
617 bool ib_active;
618 struct umr_common umrc;
619 /* sync used page count stats
620 */
621 struct mlx5_ib_resources devr;
622 struct mlx5_mr_cache cache;
623 struct timer_list delay_timer;
624 int fill_delay;
625 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
626 struct ib_odp_caps odp_caps;
627 /*
628 * Sleepable RCU that prevents destruction of MRs while they are still
629 * being used by a page fault handler.
630 */
631 struct srcu_struct mr_srcu;
632 #endif
633 struct mlx5_ib_flow_db flow_db;
634 /* protect resources needed as part of reset flow */
635 spinlock_t reset_flow_resource_lock;
636 struct list_head qp_list;
637 /* Array with num_ports elements */
638 struct mlx5_ib_port *port;
639 };
640
641 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
642 {
643 return container_of(mcq, struct mlx5_ib_cq, mcq);
644 }
645
646 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
647 {
648 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
649 }
650
651 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
652 {
653 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
654 }
655
656 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
657 {
658 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
659 }
660
661 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
662 {
663 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
664 }
665
666 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
667 {
668 return container_of(mmkey, struct mlx5_ib_mr, mmkey);
669 }
670
671 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
672 {
673 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
674 }
675
676 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
677 {
678 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
679 }
680
681 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
682 {
683 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
684 }
685
686 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
687 {
688 return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
689 }
690
691 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
692 {
693 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
694 }
695
696 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
697 {
698 return container_of(msrq, struct mlx5_ib_srq, msrq);
699 }
700
701 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
702 {
703 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
704 }
705
706 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
707 {
708 return container_of(ibmw, struct mlx5_ib_mw, ibmw);
709 }
710
711 struct mlx5_ib_ah {
712 struct ib_ah ibah;
713 struct mlx5_av av;
714 };
715
716 static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
717 {
718 return container_of(ibah, struct mlx5_ib_ah, ibah);
719 }
720
721 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
722 struct mlx5_db *db);
723 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
724 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
725 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
726 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
727 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
728 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
729 const void *in_mad, void *response_mad);
730 struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
731 int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
732 int mlx5_ib_destroy_ah(struct ib_ah *ah);
733 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
734 struct ib_srq_init_attr *init_attr,
735 struct ib_udata *udata);
736 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
737 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
738 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
739 int mlx5_ib_destroy_srq(struct ib_srq *srq);
740 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
741 struct ib_recv_wr **bad_wr);
742 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
743 struct ib_qp_init_attr *init_attr,
744 struct ib_udata *udata);
745 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
746 int attr_mask, struct ib_udata *udata);
747 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
748 struct ib_qp_init_attr *qp_init_attr);
749 int mlx5_ib_destroy_qp(struct ib_qp *qp);
750 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
751 struct ib_send_wr **bad_wr);
752 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
753 struct ib_recv_wr **bad_wr);
754 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
755 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
756 void *buffer, u32 length,
757 struct mlx5_ib_qp_base *base);
758 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
759 const struct ib_cq_init_attr *attr,
760 struct ib_ucontext *context,
761 struct ib_udata *udata);
762 int mlx5_ib_destroy_cq(struct ib_cq *cq);
763 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
764 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
765 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
766 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
767 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
768 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
769 u64 virt_addr, int access_flags,
770 struct ib_udata *udata);
771 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
772 struct ib_udata *udata);
773 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
774 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
775 int npages, int zap);
776 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
777 u64 length, u64 virt_addr, int access_flags,
778 struct ib_pd *pd, struct ib_udata *udata);
779 int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
780 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
781 enum ib_mr_type mr_type,
782 u32 max_num_sg);
783 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
784 unsigned int *sg_offset);
785 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
786 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
787 const struct ib_mad_hdr *in, size_t in_mad_size,
788 struct ib_mad_hdr *out, size_t *out_mad_size,
789 u16 *out_mad_pkey_index);
790 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
791 struct ib_ucontext *context,
792 struct ib_udata *udata);
793 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
794 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
795 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
796 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
797 struct ib_smp *out_mad);
798 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
799 __be64 *sys_image_guid);
800 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
801 u16 *max_pkeys);
802 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
803 u32 *vendor_id);
804 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
805 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
806 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
807 u16 *pkey);
808 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
809 union ib_gid *gid);
810 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
811 struct ib_port_attr *props);
812 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
813 struct ib_port_attr *props);
814 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
815 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
816 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
817 int *ncont, int *order);
818 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
819 int page_shift, size_t offset, size_t num_pages,
820 __be64 *pas, int access_flags);
821 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
822 int page_shift, __be64 *pas, int access_flags);
823 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
824 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
825 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
826 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
827 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
828 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
829 struct ib_mr_status *mr_status);
830 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
831 struct ib_wq_init_attr *init_attr,
832 struct ib_udata *udata);
833 int mlx5_ib_destroy_wq(struct ib_wq *wq);
834 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
835 u32 wq_attr_mask, struct ib_udata *udata);
836 struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
837 struct ib_rwq_ind_table_init_attr *init_attr,
838 struct ib_udata *udata);
839 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
840
841 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
842 extern struct workqueue_struct *mlx5_ib_page_fault_wq;
843
844 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
845 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
846 struct mlx5_ib_pfault *pfault);
847 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
848 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
849 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
850 int __init mlx5_ib_odp_init(void);
851 void mlx5_ib_odp_cleanup(void);
852 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
853 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
854 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
855 unsigned long end);
856 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
857 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
858 {
859 return;
860 }
861
862 static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
863 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
864 static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
865 static inline int mlx5_ib_odp_init(void) { return 0; }
866 static inline void mlx5_ib_odp_cleanup(void) {}
867 static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
868 static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
869
870 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
871
872 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
873 u8 port, struct ifla_vf_info *info);
874 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
875 u8 port, int state);
876 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
877 u8 port, struct ifla_vf_stats *stats);
878 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
879 u64 guid, int type);
880
881 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
882 int index);
883
884 /* GSI QP helper functions */
885 struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
886 struct ib_qp_init_attr *init_attr);
887 int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
888 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
889 int attr_mask);
890 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
891 int qp_attr_mask,
892 struct ib_qp_init_attr *qp_init_attr);
893 int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
894 struct ib_send_wr **bad_wr);
895 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
896 struct ib_recv_wr **bad_wr);
897 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
898
899 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
900
901 static inline void init_query_mad(struct ib_smp *mad)
902 {
903 mad->base_version = 1;
904 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
905 mad->class_version = 1;
906 mad->method = IB_MGMT_METHOD_GET;
907 }
908
909 static inline u8 convert_access(int acc)
910 {
911 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
912 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
913 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
914 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
915 MLX5_PERM_LOCAL_READ;
916 }
917
918 static inline int is_qp1(enum ib_qp_type qp_type)
919 {
920 return qp_type == MLX5_IB_QPT_HW_GSI;
921 }
922
923 #define MLX5_MAX_UMR_SHIFT 16
924 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
925
926 static inline u32 check_cq_create_flags(u32 flags)
927 {
928 /*
929 * It returns non-zero value for unsupported CQ
930 * create flags, otherwise it returns zero.
931 */
932 return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN |
933 IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
934 }
935
936 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
937 u32 *user_index)
938 {
939 if (cqe_version) {
940 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
941 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
942 return -EINVAL;
943 *user_index = cmd_uidx;
944 } else {
945 *user_index = MLX5_IB_DEFAULT_UIDX;
946 }
947
948 return 0;
949 }
950 #endif /* MLX5_IB_H */