2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <rdma/ib_verbs.h>
39 #include <rdma/ib_smi.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/cq.h>
42 #include <linux/mlx5/qp.h>
43 #include <linux/mlx5/srq.h>
44 #include <linux/types.h>
46 #define mlx5_ib_dbg(dev, format, arg...) \
47 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
48 __LINE__, current->pid, ##arg)
50 #define mlx5_ib_err(dev, format, arg...) \
51 pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
52 __LINE__, current->pid, ##arg)
54 #define mlx5_ib_warn(dev, format, arg...) \
55 pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
56 __LINE__, current->pid, ##arg)
59 MLX5_IB_MMAP_CMD_SHIFT
= 8,
60 MLX5_IB_MMAP_CMD_MASK
= 0xff,
63 enum mlx5_ib_mmap_cmd
{
64 MLX5_IB_MMAP_REGULAR_PAGE
= 0,
65 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES
= 1, /* always last */
69 MLX5_RES_SCAT_DATA32_CQE
= 0x1,
70 MLX5_RES_SCAT_DATA64_CQE
= 0x2,
71 MLX5_REQ_SCAT_DATA32_CQE
= 0x11,
72 MLX5_REQ_SCAT_DATA64_CQE
= 0x22,
75 enum mlx5_ib_latency_class
{
76 MLX5_IB_LATENCY_CLASS_LOW
,
77 MLX5_IB_LATENCY_CLASS_MEDIUM
,
78 MLX5_IB_LATENCY_CLASS_HIGH
,
79 MLX5_IB_LATENCY_CLASS_FAST_PATH
82 enum mlx5_ib_mad_ifc_flags
{
83 MLX5_MAD_IFC_IGNORE_MKEY
= 1,
84 MLX5_MAD_IFC_IGNORE_BKEY
= 2,
85 MLX5_MAD_IFC_NET_VIEW
= 4,
88 struct mlx5_ib_ucontext
{
89 struct ib_ucontext ibucontext
;
90 struct list_head db_page_list
;
92 /* protect doorbell record alloc/free
94 struct mutex db_page_mutex
;
95 struct mlx5_uuar_info uuari
;
98 static inline struct mlx5_ib_ucontext
*to_mucontext(struct ib_ucontext
*ibucontext
)
100 return container_of(ibucontext
, struct mlx5_ib_ucontext
, ibucontext
);
108 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
109 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_IB_FLOW_MCAST_PRIO - 1)
110 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
111 #error "Invalid number of bypass priorities"
113 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
115 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
116 struct mlx5_ib_flow_prio
{
117 struct mlx5_flow_table
*flow_table
;
118 unsigned int refcount
;
121 struct mlx5_ib_flow_handler
{
122 struct list_head list
;
123 struct ib_flow ibflow
;
125 struct mlx5_flow_rule
*rule
;
128 struct mlx5_ib_flow_db
{
129 struct mlx5_ib_flow_prio prios
[MLX5_IB_NUM_FLOW_FT
];
130 /* Protect flow steering bypass flow tables
131 * when add/del flow rules.
132 * only single add/removal of flow steering rule could be done
138 /* Use macros here so that don't have to duplicate
139 * enum ib_send_flags and enum ib_qp_type for low-level driver
142 #define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
143 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
144 #define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
145 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
146 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
156 struct wr_list
*w_list
;
160 /* serialize post to the work queue
182 * Connect-IB can trigger up to four concurrent pagefaults
185 enum mlx5_ib_pagefault_context
{
186 MLX5_IB_PAGEFAULT_RESPONDER_READ
,
187 MLX5_IB_PAGEFAULT_REQUESTOR_READ
,
188 MLX5_IB_PAGEFAULT_RESPONDER_WRITE
,
189 MLX5_IB_PAGEFAULT_REQUESTOR_WRITE
,
190 MLX5_IB_PAGEFAULT_CONTEXTS
193 static inline enum mlx5_ib_pagefault_context
194 mlx5_ib_get_pagefault_context(struct mlx5_pagefault
*pagefault
)
196 return pagefault
->flags
& (MLX5_PFAULT_REQUESTOR
| MLX5_PFAULT_WRITE
);
199 struct mlx5_ib_pfault
{
200 struct work_struct work
;
201 struct mlx5_pagefault mpfault
;
208 struct mlx5_ib_raw_packet_qp
{
209 struct mlx5_ib_rq rq
;
215 struct mlx5_core_qp mqp
;
216 struct mlx5_ib_raw_packet_qp raw_packet_qp
;
222 struct mlx5_ib_wq rq
;
227 int sq_max_wqes_per_wr
;
229 struct mlx5_ib_wq sq
;
231 struct ib_umem
*umem
;
234 /* serialize qp state modifications
251 /* only for user space QPs. For kernel
252 * we have it from the bf object
258 /* Store signature errors */
261 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
263 * A flag that is true for QP's that are in a state that doesn't
264 * allow page faults, and shouldn't schedule any more faults.
266 int disable_page_faults
;
268 * The disable_page_faults_lock protects a QP's disable_page_faults
269 * field, allowing for a thread to atomically check whether the QP
270 * allows page faults, and if so schedule a page fault.
272 spinlock_t disable_page_faults_lock
;
273 struct mlx5_ib_pfault pagefaults
[MLX5_IB_PAGEFAULT_CONTEXTS
];
277 struct mlx5_ib_cq_buf
{
279 struct ib_umem
*umem
;
284 enum mlx5_ib_qp_flags
{
285 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
= 1 << 0,
286 MLX5_IB_QP_SIGNATURE_HANDLING
= 1 << 1,
290 struct ib_send_wr wr
;
296 unsigned int page_shift
;
303 static inline struct mlx5_umr_wr
*umr_wr(struct ib_send_wr
*wr
)
305 return container_of(wr
, struct mlx5_umr_wr
, wr
);
308 struct mlx5_shared_mr_info
{
310 struct ib_umem
*umem
;
315 struct mlx5_core_cq mcq
;
316 struct mlx5_ib_cq_buf buf
;
319 /* serialize access to the CQ
325 struct mutex resize_mutex
;
326 struct mlx5_ib_cq_buf
*resize_buf
;
327 struct ib_umem
*resize_umem
;
333 struct mlx5_core_srq msrq
;
337 /* protect SRQ hanlding
343 struct ib_umem
*umem
;
344 /* serialize arming a SRQ
350 struct mlx5_ib_xrcd
{
351 struct ib_xrcd ibxrcd
;
355 enum mlx5_ib_mtt_access_flags
{
356 MLX5_IB_MTT_READ
= (1 << 0),
357 MLX5_IB_MTT_WRITE
= (1 << 1),
360 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
369 struct mlx5_core_mr mmr
;
370 struct ib_umem
*umem
;
371 struct mlx5_shared_mr_info
*smr_info
;
372 struct list_head list
;
376 struct mlx5_ib_dev
*dev
;
377 struct mlx5_create_mkey_mbox_out out
;
378 struct mlx5_core_sig_ctx
*sig
;
383 struct mlx5_ib_umr_context
{
384 enum ib_wc_status status
;
385 struct completion done
;
388 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context
*context
)
390 context
->status
= -1;
391 init_completion(&context
->done
);
398 /* control access to UMR QP
400 struct semaphore sem
;
409 struct mlx5_cache_ent
{
410 struct list_head head
;
411 /* sync access to the cahce entry
424 struct dentry
*fsize
;
426 struct dentry
*fmiss
;
427 struct dentry
*flimit
;
429 struct mlx5_ib_dev
*dev
;
430 struct work_struct work
;
431 struct delayed_work dwork
;
435 struct mlx5_mr_cache
{
436 struct workqueue_struct
*wq
;
437 struct mlx5_cache_ent ent
[MAX_MR_CACHE_ENTRIES
];
440 unsigned long last_add
;
443 struct mlx5_ib_resources
{
453 struct ib_device ib_dev
;
454 struct mlx5_core_dev
*mdev
;
455 MLX5_DECLARE_DOORBELL_LOCK(uar_lock
);
457 /* serialize update of capability mask
459 struct mutex cap_mask_mutex
;
461 struct umr_common umrc
;
462 /* sync used page count stats
464 struct mlx5_ib_resources devr
;
465 struct mlx5_mr_cache cache
;
466 struct timer_list delay_timer
;
468 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
469 struct ib_odp_caps odp_caps
;
471 * Sleepable RCU that prevents destruction of MRs while they are still
472 * being used by a page fault handler.
474 struct srcu_struct mr_srcu
;
476 struct mlx5_ib_flow_db flow_db
;
479 static inline struct mlx5_ib_cq
*to_mibcq(struct mlx5_core_cq
*mcq
)
481 return container_of(mcq
, struct mlx5_ib_cq
, mcq
);
484 static inline struct mlx5_ib_xrcd
*to_mxrcd(struct ib_xrcd
*ibxrcd
)
486 return container_of(ibxrcd
, struct mlx5_ib_xrcd
, ibxrcd
);
489 static inline struct mlx5_ib_dev
*to_mdev(struct ib_device
*ibdev
)
491 return container_of(ibdev
, struct mlx5_ib_dev
, ib_dev
);
494 static inline struct mlx5_ib_cq
*to_mcq(struct ib_cq
*ibcq
)
496 return container_of(ibcq
, struct mlx5_ib_cq
, ibcq
);
499 static inline struct mlx5_ib_qp
*to_mibqp(struct mlx5_core_qp
*mqp
)
501 return container_of(mqp
, struct mlx5_ib_qp
, mqp
);
504 static inline struct mlx5_ib_mr
*to_mibmr(struct mlx5_core_mr
*mmr
)
506 return container_of(mmr
, struct mlx5_ib_mr
, mmr
);
509 static inline struct mlx5_ib_pd
*to_mpd(struct ib_pd
*ibpd
)
511 return container_of(ibpd
, struct mlx5_ib_pd
, ibpd
);
514 static inline struct mlx5_ib_srq
*to_msrq(struct ib_srq
*ibsrq
)
516 return container_of(ibsrq
, struct mlx5_ib_srq
, ibsrq
);
519 static inline struct mlx5_ib_qp
*to_mqp(struct ib_qp
*ibqp
)
521 return container_of(ibqp
, struct mlx5_ib_qp
, ibqp
);
524 static inline struct mlx5_ib_srq
*to_mibsrq(struct mlx5_core_srq
*msrq
)
526 return container_of(msrq
, struct mlx5_ib_srq
, msrq
);
529 static inline struct mlx5_ib_mr
*to_mmr(struct ib_mr
*ibmr
)
531 return container_of(ibmr
, struct mlx5_ib_mr
, ibmr
);
539 static inline struct mlx5_ib_ah
*to_mah(struct ib_ah
*ibah
)
541 return container_of(ibah
, struct mlx5_ib_ah
, ibah
);
544 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext
*context
, unsigned long virt
,
546 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext
*context
, struct mlx5_db
*db
);
547 void __mlx5_ib_cq_clean(struct mlx5_ib_cq
*cq
, u32 qpn
, struct mlx5_ib_srq
*srq
);
548 void mlx5_ib_cq_clean(struct mlx5_ib_cq
*cq
, u32 qpn
, struct mlx5_ib_srq
*srq
);
549 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq
*srq
, int wqe_index
);
550 int mlx5_MAD_IFC(struct mlx5_ib_dev
*dev
, int ignore_mkey
, int ignore_bkey
,
551 u8 port
, const struct ib_wc
*in_wc
, const struct ib_grh
*in_grh
,
552 const void *in_mad
, void *response_mad
);
553 struct ib_ah
*create_ib_ah(struct ib_ah_attr
*ah_attr
,
554 struct mlx5_ib_ah
*ah
);
555 struct ib_ah
*mlx5_ib_create_ah(struct ib_pd
*pd
, struct ib_ah_attr
*ah_attr
);
556 int mlx5_ib_query_ah(struct ib_ah
*ibah
, struct ib_ah_attr
*ah_attr
);
557 int mlx5_ib_destroy_ah(struct ib_ah
*ah
);
558 struct ib_srq
*mlx5_ib_create_srq(struct ib_pd
*pd
,
559 struct ib_srq_init_attr
*init_attr
,
560 struct ib_udata
*udata
);
561 int mlx5_ib_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
562 enum ib_srq_attr_mask attr_mask
, struct ib_udata
*udata
);
563 int mlx5_ib_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*srq_attr
);
564 int mlx5_ib_destroy_srq(struct ib_srq
*srq
);
565 int mlx5_ib_post_srq_recv(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
566 struct ib_recv_wr
**bad_wr
);
567 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
568 struct ib_qp_init_attr
*init_attr
,
569 struct ib_udata
*udata
);
570 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
571 int attr_mask
, struct ib_udata
*udata
);
572 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
573 struct ib_qp_init_attr
*qp_init_attr
);
574 int mlx5_ib_destroy_qp(struct ib_qp
*qp
);
575 int mlx5_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
576 struct ib_send_wr
**bad_wr
);
577 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
578 struct ib_recv_wr
**bad_wr
);
579 void *mlx5_get_send_wqe(struct mlx5_ib_qp
*qp
, int n
);
580 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp
*qp
, int send
, int wqe_index
,
581 void *buffer
, u32 length
);
582 struct ib_cq
*mlx5_ib_create_cq(struct ib_device
*ibdev
,
583 const struct ib_cq_init_attr
*attr
,
584 struct ib_ucontext
*context
,
585 struct ib_udata
*udata
);
586 int mlx5_ib_destroy_cq(struct ib_cq
*cq
);
587 int mlx5_ib_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
);
588 int mlx5_ib_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
);
589 int mlx5_ib_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
);
590 int mlx5_ib_resize_cq(struct ib_cq
*ibcq
, int entries
, struct ib_udata
*udata
);
591 struct ib_mr
*mlx5_ib_get_dma_mr(struct ib_pd
*pd
, int acc
);
592 struct ib_mr
*mlx5_ib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
593 u64 virt_addr
, int access_flags
,
594 struct ib_udata
*udata
);
595 int mlx5_ib_update_mtt(struct mlx5_ib_mr
*mr
, u64 start_page_index
,
596 int npages
, int zap
);
597 int mlx5_ib_dereg_mr(struct ib_mr
*ibmr
);
598 struct ib_mr
*mlx5_ib_alloc_mr(struct ib_pd
*pd
,
599 enum ib_mr_type mr_type
,
601 int mlx5_ib_map_mr_sg(struct ib_mr
*ibmr
,
602 struct scatterlist
*sg
,
604 int mlx5_ib_process_mad(struct ib_device
*ibdev
, int mad_flags
, u8 port_num
,
605 const struct ib_wc
*in_wc
, const struct ib_grh
*in_grh
,
606 const struct ib_mad_hdr
*in
, size_t in_mad_size
,
607 struct ib_mad_hdr
*out
, size_t *out_mad_size
,
608 u16
*out_mad_pkey_index
);
609 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
610 struct ib_ucontext
*context
,
611 struct ib_udata
*udata
);
612 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
);
613 int mlx5_ib_get_buf_offset(u64 addr
, int page_shift
, u32
*offset
);
614 int mlx5_query_ext_port_caps(struct mlx5_ib_dev
*dev
, u8 port
);
615 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device
*ibdev
,
616 struct ib_smp
*out_mad
);
617 int mlx5_query_mad_ifc_system_image_guid(struct ib_device
*ibdev
,
618 __be64
*sys_image_guid
);
619 int mlx5_query_mad_ifc_max_pkeys(struct ib_device
*ibdev
,
621 int mlx5_query_mad_ifc_vendor_id(struct ib_device
*ibdev
,
623 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev
*dev
, char *node_desc
);
624 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev
*dev
, __be64
*node_guid
);
625 int mlx5_query_mad_ifc_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
627 int mlx5_query_mad_ifc_gids(struct ib_device
*ibdev
, u8 port
, int index
,
629 int mlx5_query_mad_ifc_port(struct ib_device
*ibdev
, u8 port
,
630 struct ib_port_attr
*props
);
631 int mlx5_ib_query_port(struct ib_device
*ibdev
, u8 port
,
632 struct ib_port_attr
*props
);
633 int mlx5_ib_init_fmr(struct mlx5_ib_dev
*dev
);
634 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev
*dev
);
635 void mlx5_ib_cont_pages(struct ib_umem
*umem
, u64 addr
, int *count
, int *shift
,
636 int *ncont
, int *order
);
637 void __mlx5_ib_populate_pas(struct mlx5_ib_dev
*dev
, struct ib_umem
*umem
,
638 int page_shift
, size_t offset
, size_t num_pages
,
639 __be64
*pas
, int access_flags
);
640 void mlx5_ib_populate_pas(struct mlx5_ib_dev
*dev
, struct ib_umem
*umem
,
641 int page_shift
, __be64
*pas
, int access_flags
);
642 void mlx5_ib_copy_pas(u64
*old
, u64
*new, int step
, int num
);
643 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev
*dev
, struct ib_cq
*ibcq
);
644 int mlx5_mr_cache_init(struct mlx5_ib_dev
*dev
);
645 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev
*dev
);
646 int mlx5_mr_ib_cont_pages(struct ib_umem
*umem
, u64 addr
, int *count
, int *shift
);
647 void mlx5_umr_cq_handler(struct ib_cq
*cq
, void *cq_context
);
648 int mlx5_ib_check_mr_status(struct ib_mr
*ibmr
, u32 check_mask
,
649 struct ib_mr_status
*mr_status
);
651 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
652 extern struct workqueue_struct
*mlx5_ib_page_fault_wq
;
654 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev
*dev
);
655 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp
*qp
,
656 struct mlx5_ib_pfault
*pfault
);
657 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp
*qp
);
658 int mlx5_ib_odp_init_one(struct mlx5_ib_dev
*ibdev
);
659 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev
*ibdev
);
660 int __init
mlx5_ib_odp_init(void);
661 void mlx5_ib_odp_cleanup(void);
662 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp
*qp
);
663 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp
*qp
);
664 void mlx5_ib_invalidate_range(struct ib_umem
*umem
, unsigned long start
,
667 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
668 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev
*dev
)
673 static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp
*qp
) {}
674 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev
*ibdev
) { return 0; }
675 static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev
*ibdev
) {}
676 static inline int mlx5_ib_odp_init(void) { return 0; }
677 static inline void mlx5_ib_odp_cleanup(void) {}
678 static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp
*qp
) {}
679 static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp
*qp
) {}
681 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
683 static inline void init_query_mad(struct ib_smp
*mad
)
685 mad
->base_version
= 1;
686 mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
687 mad
->class_version
= 1;
688 mad
->method
= IB_MGMT_METHOD_GET
;
691 static inline u8
convert_access(int acc
)
693 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
694 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
695 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
696 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
697 MLX5_PERM_LOCAL_READ
;
700 static inline int is_qp1(enum ib_qp_type qp_type
)
702 return qp_type
== IB_QPT_GSI
;
705 #define MLX5_MAX_UMR_SHIFT 16
706 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
708 #endif /* MLX5_IB_H */