2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <rdma/ib_verbs.h>
39 #include <rdma/ib_smi.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/cq.h>
42 #include <linux/mlx5/qp.h>
43 #include <linux/mlx5/srq.h>
44 #include <linux/types.h>
45 #include <linux/mlx5/transobj.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/mlx5-abi.h>
49 #define mlx5_ib_dbg(dev, format, arg...) \
50 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
51 __LINE__, current->pid, ##arg)
53 #define mlx5_ib_err(dev, format, arg...) \
54 pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
55 __LINE__, current->pid, ##arg)
57 #define mlx5_ib_warn(dev, format, arg...) \
58 pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
59 __LINE__, current->pid, ##arg)
61 #define field_avail(type, fld, sz) (offsetof(type, fld) + \
62 sizeof(((type *)0)->fld) <= (sz))
63 #define MLX5_IB_DEFAULT_UIDX 0xffffff
64 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
66 #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
69 MLX5_IB_MMAP_CMD_SHIFT
= 8,
70 MLX5_IB_MMAP_CMD_MASK
= 0xff,
73 enum mlx5_ib_mmap_cmd
{
74 MLX5_IB_MMAP_REGULAR_PAGE
= 0,
75 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES
= 1,
76 MLX5_IB_MMAP_WC_PAGE
= 2,
77 MLX5_IB_MMAP_NC_PAGE
= 3,
78 /* 5 is chosen in order to be compatible with old versions of libmlx5 */
79 MLX5_IB_MMAP_CORE_CLOCK
= 5,
83 MLX5_RES_SCAT_DATA32_CQE
= 0x1,
84 MLX5_RES_SCAT_DATA64_CQE
= 0x2,
85 MLX5_REQ_SCAT_DATA32_CQE
= 0x11,
86 MLX5_REQ_SCAT_DATA64_CQE
= 0x22,
89 enum mlx5_ib_latency_class
{
90 MLX5_IB_LATENCY_CLASS_LOW
,
91 MLX5_IB_LATENCY_CLASS_MEDIUM
,
92 MLX5_IB_LATENCY_CLASS_HIGH
,
93 MLX5_IB_LATENCY_CLASS_FAST_PATH
96 enum mlx5_ib_mad_ifc_flags
{
97 MLX5_MAD_IFC_IGNORE_MKEY
= 1,
98 MLX5_MAD_IFC_IGNORE_BKEY
= 2,
99 MLX5_MAD_IFC_NET_VIEW
= 4,
103 MLX5_CROSS_CHANNEL_UUAR
= 0,
111 struct mlx5_ib_vma_private_data
{
112 struct list_head list
;
113 struct vm_area_struct
*vma
;
116 struct mlx5_ib_ucontext
{
117 struct ib_ucontext ibucontext
;
118 struct list_head db_page_list
;
120 /* protect doorbell record alloc/free
122 struct mutex db_page_mutex
;
123 struct mlx5_uuar_info uuari
;
125 /* Transport Domain number */
127 struct list_head vma_private_list
;
130 static inline struct mlx5_ib_ucontext
*to_mucontext(struct ib_ucontext
*ibucontext
)
132 return container_of(ibucontext
, struct mlx5_ib_ucontext
, ibucontext
);
140 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
141 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
142 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
143 #error "Invalid number of bypass priorities"
145 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
147 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
148 #define MLX5_IB_NUM_SNIFFER_FTS 2
149 struct mlx5_ib_flow_prio
{
150 struct mlx5_flow_table
*flow_table
;
151 unsigned int refcount
;
154 struct mlx5_ib_flow_handler
{
155 struct list_head list
;
156 struct ib_flow ibflow
;
157 struct mlx5_ib_flow_prio
*prio
;
158 struct mlx5_flow_handle
*rule
;
161 struct mlx5_ib_flow_db
{
162 struct mlx5_ib_flow_prio prios
[MLX5_IB_NUM_FLOW_FT
];
163 struct mlx5_ib_flow_prio sniffer
[MLX5_IB_NUM_SNIFFER_FTS
];
164 struct mlx5_flow_table
*lag_demux_ft
;
165 /* Protect flow steering bypass flow tables
166 * when add/del flow rules.
167 * only single add/removal of flow steering rule could be done
173 /* Use macros here so that don't have to duplicate
174 * enum ib_send_flags and enum ib_qp_type for low-level driver
177 #define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
178 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
179 #define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
181 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 3)
182 #define MLX5_IB_SEND_UMR_UPDATE_PD (IB_SEND_RESERVED_START << 4)
183 #define MLX5_IB_SEND_UMR_UPDATE_ACCESS IB_SEND_RESERVED_END
185 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
187 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
188 * creates the actual hardware QP.
190 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
191 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
193 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
195 * These flags are intended for internal use by the mlx5_ib driver, and they
196 * rely on the range reserved for that use in the ib_qp_create_flags enum.
199 /* Create a UD QP whose source QP number is 1 */
200 static inline enum ib_qp_create_flags
mlx5_ib_create_qp_sqpn_qp1(void)
202 return IB_QP_CREATE_RESERVED_START
;
213 struct wr_list
*w_list
;
217 /* serialize post to the work queue
234 struct mlx5_core_qp core_qp
;
240 struct ib_umem
*umem
;
242 unsigned int page_shift
;
262 struct mlx5_ib_rwq_ind_table
{
263 struct ib_rwq_ind_table ib_rwq_ind_tbl
;
268 * Connect-IB can trigger up to four concurrent pagefaults
271 enum mlx5_ib_pagefault_context
{
272 MLX5_IB_PAGEFAULT_RESPONDER_READ
,
273 MLX5_IB_PAGEFAULT_REQUESTOR_READ
,
274 MLX5_IB_PAGEFAULT_RESPONDER_WRITE
,
275 MLX5_IB_PAGEFAULT_REQUESTOR_WRITE
,
276 MLX5_IB_PAGEFAULT_CONTEXTS
279 static inline enum mlx5_ib_pagefault_context
280 mlx5_ib_get_pagefault_context(struct mlx5_pagefault
*pagefault
)
282 return pagefault
->flags
& (MLX5_PFAULT_REQUESTOR
| MLX5_PFAULT_WRITE
);
285 struct mlx5_ib_pfault
{
286 struct work_struct work
;
287 struct mlx5_pagefault mpfault
;
290 struct mlx5_ib_ubuffer
{
291 struct ib_umem
*umem
;
296 struct mlx5_ib_qp_base
{
297 struct mlx5_ib_qp
*container_mibqp
;
298 struct mlx5_core_qp mqp
;
299 struct mlx5_ib_ubuffer ubuffer
;
302 struct mlx5_ib_qp_trans
{
303 struct mlx5_ib_qp_base base
;
310 struct mlx5_ib_rss_qp
{
315 struct mlx5_ib_qp_base base
;
316 struct mlx5_ib_wq
*rq
;
317 struct mlx5_ib_ubuffer ubuffer
;
318 struct mlx5_db
*doorbell
;
324 struct mlx5_ib_qp_base base
;
325 struct mlx5_ib_wq
*sq
;
326 struct mlx5_ib_ubuffer ubuffer
;
327 struct mlx5_db
*doorbell
;
332 struct mlx5_ib_raw_packet_qp
{
333 struct mlx5_ib_sq sq
;
334 struct mlx5_ib_rq rq
;
340 struct mlx5_ib_qp_trans trans_qp
;
341 struct mlx5_ib_raw_packet_qp raw_packet_qp
;
342 struct mlx5_ib_rss_qp rss_qp
;
347 struct mlx5_ib_wq rq
;
351 struct mlx5_ib_wq sq
;
353 /* serialize qp state modifications
365 /* only for user space QPs. For kernel
366 * we have it from the bf object
372 /* Store signature errors */
375 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
377 * A flag that is true for QP's that are in a state that doesn't
378 * allow page faults, and shouldn't schedule any more faults.
380 int disable_page_faults
;
382 * The disable_page_faults_lock protects a QP's disable_page_faults
383 * field, allowing for a thread to atomically check whether the QP
384 * allows page faults, and if so schedule a page fault.
386 spinlock_t disable_page_faults_lock
;
387 struct mlx5_ib_pfault pagefaults
[MLX5_IB_PAGEFAULT_CONTEXTS
];
389 struct list_head qps_list
;
390 struct list_head cq_recv_list
;
391 struct list_head cq_send_list
;
395 struct mlx5_ib_cq_buf
{
397 struct ib_umem
*umem
;
402 enum mlx5_ib_qp_flags
{
403 MLX5_IB_QP_LSO
= IB_QP_CREATE_IPOIB_UD_LSO
,
404 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
,
405 MLX5_IB_QP_CROSS_CHANNEL
= IB_QP_CREATE_CROSS_CHANNEL
,
406 MLX5_IB_QP_MANAGED_SEND
= IB_QP_CREATE_MANAGED_SEND
,
407 MLX5_IB_QP_MANAGED_RECV
= IB_QP_CREATE_MANAGED_RECV
,
408 MLX5_IB_QP_SIGNATURE_HANDLING
= 1 << 5,
409 /* QP uses 1 as its source QP number */
410 MLX5_IB_QP_SQPN_QP1
= 1 << 6,
411 MLX5_IB_QP_CAP_SCATTER_FCS
= 1 << 7,
412 MLX5_IB_QP_RSS
= 1 << 8,
416 struct ib_send_wr wr
;
422 unsigned int page_shift
;
429 static inline struct mlx5_umr_wr
*umr_wr(struct ib_send_wr
*wr
)
431 return container_of(wr
, struct mlx5_umr_wr
, wr
);
434 struct mlx5_shared_mr_info
{
436 struct ib_umem
*umem
;
441 struct mlx5_core_cq mcq
;
442 struct mlx5_ib_cq_buf buf
;
445 /* serialize access to the CQ
451 struct mutex resize_mutex
;
452 struct mlx5_ib_cq_buf
*resize_buf
;
453 struct ib_umem
*resize_umem
;
455 struct list_head list_send_qp
;
456 struct list_head list_recv_qp
;
458 struct list_head wc_list
;
459 enum ib_cq_notify_flags notify_flags
;
460 struct work_struct notify_work
;
465 struct list_head list
;
470 struct mlx5_core_srq msrq
;
474 /* protect SRQ hanlding
480 struct ib_umem
*umem
;
481 /* serialize arming a SRQ
487 struct mlx5_ib_xrcd
{
488 struct ib_xrcd ibxrcd
;
492 enum mlx5_ib_mtt_access_flags
{
493 MLX5_IB_MTT_READ
= (1 << 0),
494 MLX5_IB_MTT_WRITE
= (1 << 1),
497 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
507 struct mlx5_core_mkey mmkey
;
508 struct ib_umem
*umem
;
509 struct mlx5_shared_mr_info
*smr_info
;
510 struct list_head list
;
514 struct mlx5_ib_dev
*dev
;
515 u32 out
[MLX5_ST_SZ_DW(create_mkey_out
)];
516 struct mlx5_core_sig_ctx
*sig
;
519 int access_flags
; /* Needed for rereg MR */
524 struct mlx5_core_mkey mmkey
;
527 struct mlx5_ib_umr_context
{
529 enum ib_wc_status status
;
530 struct completion done
;
537 /* control access to UMR QP
539 struct semaphore sem
;
548 struct mlx5_cache_ent
{
549 struct list_head head
;
550 /* sync access to the cahce entry
563 struct dentry
*fsize
;
565 struct dentry
*fmiss
;
566 struct dentry
*flimit
;
568 struct mlx5_ib_dev
*dev
;
569 struct work_struct work
;
570 struct delayed_work dwork
;
574 struct mlx5_mr_cache
{
575 struct workqueue_struct
*wq
;
576 struct mlx5_cache_ent ent
[MAX_MR_CACHE_ENTRIES
];
579 unsigned long last_add
;
582 struct mlx5_ib_gsi_qp
;
584 struct mlx5_ib_port_resources
{
585 struct mlx5_ib_resources
*devr
;
586 struct mlx5_ib_gsi_qp
*gsi
;
587 struct work_struct pkey_change_work
;
590 struct mlx5_ib_resources
{
597 struct mlx5_ib_port_resources ports
[2];
598 /* Protects changes to the port resources */
602 struct mlx5_ib_port
{
607 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
610 rwlock_t netdev_lock
;
611 struct net_device
*netdev
;
612 struct notifier_block nb
;
617 struct ib_device ib_dev
;
618 struct mlx5_core_dev
*mdev
;
619 struct mlx5_roce roce
;
620 MLX5_DECLARE_DOORBELL_LOCK(uar_lock
);
622 /* serialize update of capability mask
624 struct mutex cap_mask_mutex
;
626 struct umr_common umrc
;
627 /* sync used page count stats
629 struct mlx5_ib_resources devr
;
630 struct mlx5_mr_cache cache
;
631 struct timer_list delay_timer
;
632 /* Prevents soft lock on massive reg MRs */
633 struct mutex slow_path_mutex
;
635 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
636 struct ib_odp_caps odp_caps
;
638 * Sleepable RCU that prevents destruction of MRs while they are still
639 * being used by a page fault handler.
641 struct srcu_struct mr_srcu
;
643 struct mlx5_ib_flow_db flow_db
;
644 /* protect resources needed as part of reset flow */
645 spinlock_t reset_flow_resource_lock
;
646 struct list_head qp_list
;
647 /* Array with num_ports elements */
648 struct mlx5_ib_port
*port
;
651 static inline struct mlx5_ib_cq
*to_mibcq(struct mlx5_core_cq
*mcq
)
653 return container_of(mcq
, struct mlx5_ib_cq
, mcq
);
656 static inline struct mlx5_ib_xrcd
*to_mxrcd(struct ib_xrcd
*ibxrcd
)
658 return container_of(ibxrcd
, struct mlx5_ib_xrcd
, ibxrcd
);
661 static inline struct mlx5_ib_dev
*to_mdev(struct ib_device
*ibdev
)
663 return container_of(ibdev
, struct mlx5_ib_dev
, ib_dev
);
666 static inline struct mlx5_ib_cq
*to_mcq(struct ib_cq
*ibcq
)
668 return container_of(ibcq
, struct mlx5_ib_cq
, ibcq
);
671 static inline struct mlx5_ib_qp
*to_mibqp(struct mlx5_core_qp
*mqp
)
673 return container_of(mqp
, struct mlx5_ib_qp_base
, mqp
)->container_mibqp
;
676 static inline struct mlx5_ib_rwq
*to_mibrwq(struct mlx5_core_qp
*core_qp
)
678 return container_of(core_qp
, struct mlx5_ib_rwq
, core_qp
);
681 static inline struct mlx5_ib_mr
*to_mibmr(struct mlx5_core_mkey
*mmkey
)
683 return container_of(mmkey
, struct mlx5_ib_mr
, mmkey
);
686 static inline struct mlx5_ib_pd
*to_mpd(struct ib_pd
*ibpd
)
688 return container_of(ibpd
, struct mlx5_ib_pd
, ibpd
);
691 static inline struct mlx5_ib_srq
*to_msrq(struct ib_srq
*ibsrq
)
693 return container_of(ibsrq
, struct mlx5_ib_srq
, ibsrq
);
696 static inline struct mlx5_ib_qp
*to_mqp(struct ib_qp
*ibqp
)
698 return container_of(ibqp
, struct mlx5_ib_qp
, ibqp
);
701 static inline struct mlx5_ib_rwq
*to_mrwq(struct ib_wq
*ibwq
)
703 return container_of(ibwq
, struct mlx5_ib_rwq
, ibwq
);
706 static inline struct mlx5_ib_rwq_ind_table
*to_mrwq_ind_table(struct ib_rwq_ind_table
*ib_rwq_ind_tbl
)
708 return container_of(ib_rwq_ind_tbl
, struct mlx5_ib_rwq_ind_table
, ib_rwq_ind_tbl
);
711 static inline struct mlx5_ib_srq
*to_mibsrq(struct mlx5_core_srq
*msrq
)
713 return container_of(msrq
, struct mlx5_ib_srq
, msrq
);
716 static inline struct mlx5_ib_mr
*to_mmr(struct ib_mr
*ibmr
)
718 return container_of(ibmr
, struct mlx5_ib_mr
, ibmr
);
721 static inline struct mlx5_ib_mw
*to_mmw(struct ib_mw
*ibmw
)
723 return container_of(ibmw
, struct mlx5_ib_mw
, ibmw
);
731 static inline struct mlx5_ib_ah
*to_mah(struct ib_ah
*ibah
)
733 return container_of(ibah
, struct mlx5_ib_ah
, ibah
);
736 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext
*context
, unsigned long virt
,
738 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext
*context
, struct mlx5_db
*db
);
739 void __mlx5_ib_cq_clean(struct mlx5_ib_cq
*cq
, u32 qpn
, struct mlx5_ib_srq
*srq
);
740 void mlx5_ib_cq_clean(struct mlx5_ib_cq
*cq
, u32 qpn
, struct mlx5_ib_srq
*srq
);
741 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq
*srq
, int wqe_index
);
742 int mlx5_MAD_IFC(struct mlx5_ib_dev
*dev
, int ignore_mkey
, int ignore_bkey
,
743 u8 port
, const struct ib_wc
*in_wc
, const struct ib_grh
*in_grh
,
744 const void *in_mad
, void *response_mad
);
745 struct ib_ah
*mlx5_ib_create_ah(struct ib_pd
*pd
, struct ib_ah_attr
*ah_attr
,
746 struct ib_udata
*udata
);
747 int mlx5_ib_query_ah(struct ib_ah
*ibah
, struct ib_ah_attr
*ah_attr
);
748 int mlx5_ib_destroy_ah(struct ib_ah
*ah
);
749 struct ib_srq
*mlx5_ib_create_srq(struct ib_pd
*pd
,
750 struct ib_srq_init_attr
*init_attr
,
751 struct ib_udata
*udata
);
752 int mlx5_ib_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
753 enum ib_srq_attr_mask attr_mask
, struct ib_udata
*udata
);
754 int mlx5_ib_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*srq_attr
);
755 int mlx5_ib_destroy_srq(struct ib_srq
*srq
);
756 int mlx5_ib_post_srq_recv(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
757 struct ib_recv_wr
**bad_wr
);
758 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
759 struct ib_qp_init_attr
*init_attr
,
760 struct ib_udata
*udata
);
761 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
762 int attr_mask
, struct ib_udata
*udata
);
763 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
764 struct ib_qp_init_attr
*qp_init_attr
);
765 int mlx5_ib_destroy_qp(struct ib_qp
*qp
);
766 int mlx5_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
767 struct ib_send_wr
**bad_wr
);
768 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
769 struct ib_recv_wr
**bad_wr
);
770 void *mlx5_get_send_wqe(struct mlx5_ib_qp
*qp
, int n
);
771 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp
*qp
, int send
, int wqe_index
,
772 void *buffer
, u32 length
,
773 struct mlx5_ib_qp_base
*base
);
774 struct ib_cq
*mlx5_ib_create_cq(struct ib_device
*ibdev
,
775 const struct ib_cq_init_attr
*attr
,
776 struct ib_ucontext
*context
,
777 struct ib_udata
*udata
);
778 int mlx5_ib_destroy_cq(struct ib_cq
*cq
);
779 int mlx5_ib_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
);
780 int mlx5_ib_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
);
781 int mlx5_ib_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
);
782 int mlx5_ib_resize_cq(struct ib_cq
*ibcq
, int entries
, struct ib_udata
*udata
);
783 struct ib_mr
*mlx5_ib_get_dma_mr(struct ib_pd
*pd
, int acc
);
784 struct ib_mr
*mlx5_ib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
785 u64 virt_addr
, int access_flags
,
786 struct ib_udata
*udata
);
787 struct ib_mw
*mlx5_ib_alloc_mw(struct ib_pd
*pd
, enum ib_mw_type type
,
788 struct ib_udata
*udata
);
789 int mlx5_ib_dealloc_mw(struct ib_mw
*mw
);
790 int mlx5_ib_update_mtt(struct mlx5_ib_mr
*mr
, u64 start_page_index
,
791 int npages
, int zap
);
792 int mlx5_ib_rereg_user_mr(struct ib_mr
*ib_mr
, int flags
, u64 start
,
793 u64 length
, u64 virt_addr
, int access_flags
,
794 struct ib_pd
*pd
, struct ib_udata
*udata
);
795 int mlx5_ib_dereg_mr(struct ib_mr
*ibmr
);
796 struct ib_mr
*mlx5_ib_alloc_mr(struct ib_pd
*pd
,
797 enum ib_mr_type mr_type
,
799 int mlx5_ib_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
, int sg_nents
,
800 unsigned int *sg_offset
);
801 int mlx5_ib_process_mad(struct ib_device
*ibdev
, int mad_flags
, u8 port_num
,
802 const struct ib_wc
*in_wc
, const struct ib_grh
*in_grh
,
803 const struct ib_mad_hdr
*in
, size_t in_mad_size
,
804 struct ib_mad_hdr
*out
, size_t *out_mad_size
,
805 u16
*out_mad_pkey_index
);
806 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
807 struct ib_ucontext
*context
,
808 struct ib_udata
*udata
);
809 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
);
810 int mlx5_ib_get_buf_offset(u64 addr
, int page_shift
, u32
*offset
);
811 int mlx5_query_ext_port_caps(struct mlx5_ib_dev
*dev
, u8 port
);
812 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device
*ibdev
,
813 struct ib_smp
*out_mad
);
814 int mlx5_query_mad_ifc_system_image_guid(struct ib_device
*ibdev
,
815 __be64
*sys_image_guid
);
816 int mlx5_query_mad_ifc_max_pkeys(struct ib_device
*ibdev
,
818 int mlx5_query_mad_ifc_vendor_id(struct ib_device
*ibdev
,
820 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev
*dev
, char *node_desc
);
821 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev
*dev
, __be64
*node_guid
);
822 int mlx5_query_mad_ifc_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
824 int mlx5_query_mad_ifc_gids(struct ib_device
*ibdev
, u8 port
, int index
,
826 int mlx5_query_mad_ifc_port(struct ib_device
*ibdev
, u8 port
,
827 struct ib_port_attr
*props
);
828 int mlx5_ib_query_port(struct ib_device
*ibdev
, u8 port
,
829 struct ib_port_attr
*props
);
830 int mlx5_ib_init_fmr(struct mlx5_ib_dev
*dev
);
831 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev
*dev
);
832 void mlx5_ib_cont_pages(struct ib_umem
*umem
, u64 addr
,
833 unsigned long max_page_shift
,
834 int *count
, int *shift
,
835 int *ncont
, int *order
);
836 void __mlx5_ib_populate_pas(struct mlx5_ib_dev
*dev
, struct ib_umem
*umem
,
837 int page_shift
, size_t offset
, size_t num_pages
,
838 __be64
*pas
, int access_flags
);
839 void mlx5_ib_populate_pas(struct mlx5_ib_dev
*dev
, struct ib_umem
*umem
,
840 int page_shift
, __be64
*pas
, int access_flags
);
841 void mlx5_ib_copy_pas(u64
*old
, u64
*new, int step
, int num
);
842 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev
*dev
, struct ib_cq
*ibcq
);
843 int mlx5_mr_cache_init(struct mlx5_ib_dev
*dev
);
844 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev
*dev
);
845 int mlx5_mr_ib_cont_pages(struct ib_umem
*umem
, u64 addr
, int *count
, int *shift
);
846 int mlx5_ib_check_mr_status(struct ib_mr
*ibmr
, u32 check_mask
,
847 struct ib_mr_status
*mr_status
);
848 struct ib_wq
*mlx5_ib_create_wq(struct ib_pd
*pd
,
849 struct ib_wq_init_attr
*init_attr
,
850 struct ib_udata
*udata
);
851 int mlx5_ib_destroy_wq(struct ib_wq
*wq
);
852 int mlx5_ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*wq_attr
,
853 u32 wq_attr_mask
, struct ib_udata
*udata
);
854 struct ib_rwq_ind_table
*mlx5_ib_create_rwq_ind_table(struct ib_device
*device
,
855 struct ib_rwq_ind_table_init_attr
*init_attr
,
856 struct ib_udata
*udata
);
857 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*wq_ind_table
);
859 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
860 extern struct workqueue_struct
*mlx5_ib_page_fault_wq
;
862 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev
*dev
);
863 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp
*qp
,
864 struct mlx5_ib_pfault
*pfault
);
865 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp
*qp
);
866 int mlx5_ib_odp_init_one(struct mlx5_ib_dev
*ibdev
);
867 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev
*ibdev
);
868 int __init
mlx5_ib_odp_init(void);
869 void mlx5_ib_odp_cleanup(void);
870 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp
*qp
);
871 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp
*qp
);
872 void mlx5_ib_invalidate_range(struct ib_umem
*umem
, unsigned long start
,
874 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
875 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev
*dev
)
880 static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp
*qp
) {}
881 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev
*ibdev
) { return 0; }
882 static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev
*ibdev
) {}
883 static inline int mlx5_ib_odp_init(void) { return 0; }
884 static inline void mlx5_ib_odp_cleanup(void) {}
885 static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp
*qp
) {}
886 static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp
*qp
) {}
888 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
890 int mlx5_ib_get_vf_config(struct ib_device
*device
, int vf
,
891 u8 port
, struct ifla_vf_info
*info
);
892 int mlx5_ib_set_vf_link_state(struct ib_device
*device
, int vf
,
894 int mlx5_ib_get_vf_stats(struct ib_device
*device
, int vf
,
895 u8 port
, struct ifla_vf_stats
*stats
);
896 int mlx5_ib_set_vf_guid(struct ib_device
*device
, int vf
, u8 port
,
899 __be16
mlx5_get_roce_udp_sport(struct mlx5_ib_dev
*dev
, u8 port_num
,
902 /* GSI QP helper functions */
903 struct ib_qp
*mlx5_ib_gsi_create_qp(struct ib_pd
*pd
,
904 struct ib_qp_init_attr
*init_attr
);
905 int mlx5_ib_gsi_destroy_qp(struct ib_qp
*qp
);
906 int mlx5_ib_gsi_modify_qp(struct ib_qp
*qp
, struct ib_qp_attr
*attr
,
908 int mlx5_ib_gsi_query_qp(struct ib_qp
*qp
, struct ib_qp_attr
*qp_attr
,
910 struct ib_qp_init_attr
*qp_init_attr
);
911 int mlx5_ib_gsi_post_send(struct ib_qp
*qp
, struct ib_send_wr
*wr
,
912 struct ib_send_wr
**bad_wr
);
913 int mlx5_ib_gsi_post_recv(struct ib_qp
*qp
, struct ib_recv_wr
*wr
,
914 struct ib_recv_wr
**bad_wr
);
915 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp
*gsi
);
917 int mlx5_ib_generate_wc(struct ib_cq
*ibcq
, struct ib_wc
*wc
);
919 static inline void init_query_mad(struct ib_smp
*mad
)
921 mad
->base_version
= 1;
922 mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
923 mad
->class_version
= 1;
924 mad
->method
= IB_MGMT_METHOD_GET
;
927 static inline u8
convert_access(int acc
)
929 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
930 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
931 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
932 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
933 MLX5_PERM_LOCAL_READ
;
936 static inline int is_qp1(enum ib_qp_type qp_type
)
938 return qp_type
== MLX5_IB_QPT_HW_GSI
;
941 #define MLX5_MAX_UMR_SHIFT 16
942 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
944 static inline u32
check_cq_create_flags(u32 flags
)
947 * It returns non-zero value for unsupported CQ
948 * create flags, otherwise it returns zero.
950 return (flags
& ~(IB_CQ_FLAGS_IGNORE_OVERRUN
|
951 IB_CQ_FLAGS_TIMESTAMP_COMPLETION
));
954 static inline int verify_assign_uidx(u8 cqe_version
, u32 cmd_uidx
,
958 if ((cmd_uidx
== MLX5_IB_DEFAULT_UIDX
) ||
959 (cmd_uidx
& ~MLX5_USER_ASSIGNED_UIDX_MASK
))
961 *user_index
= cmd_uidx
;
963 *user_index
= MLX5_IB_DEFAULT_UIDX
;
969 static inline int get_qp_user_index(struct mlx5_ib_ucontext
*ucontext
,
970 struct mlx5_ib_create_qp
*ucmd
,
974 u8 cqe_version
= ucontext
->cqe_version
;
976 if (field_avail(struct mlx5_ib_create_qp
, uidx
, inlen
) &&
977 !cqe_version
&& (ucmd
->uidx
== MLX5_IB_DEFAULT_UIDX
))
980 if (!!(field_avail(struct mlx5_ib_create_qp
, uidx
, inlen
) !=
984 return verify_assign_uidx(cqe_version
, ucmd
->uidx
, user_index
);
987 static inline int get_srq_user_index(struct mlx5_ib_ucontext
*ucontext
,
988 struct mlx5_ib_create_srq
*ucmd
,
992 u8 cqe_version
= ucontext
->cqe_version
;
994 if (field_avail(struct mlx5_ib_create_srq
, uidx
, inlen
) &&
995 !cqe_version
&& (ucmd
->uidx
== MLX5_IB_DEFAULT_UIDX
))
998 if (!!(field_avail(struct mlx5_ib_create_srq
, uidx
, inlen
) !=
1002 return verify_assign_uidx(cqe_version
, ucmd
->uidx
, user_index
);
1004 #endif /* MLX5_IB_H */