2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef _HNS_ROCE_DEVICE_H
34 #define _HNS_ROCE_DEVICE_H
36 #include <rdma/ib_verbs.h>
38 #define DRV_NAME "hns_roce"
40 #define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
42 #define MAC_ADDR_OCTET_NUM 6
43 #define HNS_ROCE_MAX_MSG_LEN 0x80000000
45 #define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
47 #define HNS_ROCE_IB_MIN_SQ_STRIDE 6
49 #define HNS_ROCE_BA_SIZE (32 * 4096)
51 /* Hardware specification only for v1 engine */
52 #define HNS_ROCE_MIN_CQE_NUM 0x40
53 #define HNS_ROCE_MIN_WQE_NUM 0x20
55 /* Hardware specification only for v1 engine */
56 #define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
57 #define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
59 #define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20
60 #define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \
61 (5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS)
62 #define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
63 #define HNS_ROCE_MIN_CQE_CNT 16
65 #define HNS_ROCE_MAX_IRQ_NUM 128
70 #define HNS_ROCE_CEQ 0
71 #define HNS_ROCE_AEQ 1
73 #define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
74 #define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
77 #define HNS_ROCE_SL_SHIFT 28
78 #define HNS_ROCE_TCLASS_SHIFT 20
79 #define HNS_ROCE_FLOW_LABEL_MASK 0xfffff
81 #define HNS_ROCE_MAX_PORTS 6
82 #define HNS_ROCE_MAX_GID_NUM 16
83 #define HNS_ROCE_GID_SIZE 16
85 #define HNS_ROCE_HOP_NUM_0 0xff
87 #define BITMAP_NO_RR 0
90 #define MR_TYPE_MR 0x00
91 #define MR_TYPE_FRMR 0x01
92 #define MR_TYPE_DMA 0x03
94 #define HNS_ROCE_FRMR_MAX_PA 512
96 #define PKEY_ID 0xffff
98 #define NODE_DESC_SIZE 64
99 #define DB_REG_OFFSET 0x1000
101 #define SERV_TYPE_RC 0
102 #define SERV_TYPE_RD 1
103 #define SERV_TYPE_UC 2
104 #define SERV_TYPE_UD 3
106 /* Configure to HW for PAGE_SIZE larger than 4KB */
107 #define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
109 #define PAGES_SHIFT_8 8
110 #define PAGES_SHIFT_16 16
111 #define PAGES_SHIFT_24 24
112 #define PAGES_SHIFT_32 32
114 #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
115 #define SRQ_DB_REG 0x230
118 HNS_ROCE_SUPPORT_RQ_RECORD_DB
= 1 << 0,
119 HNS_ROCE_SUPPORT_SQ_RECORD_DB
= 1 << 1,
123 HNS_ROCE_SUPPORT_CQ_RECORD_DB
= 1 << 0,
126 enum hns_roce_qp_state
{
127 HNS_ROCE_QP_STATE_RST
,
128 HNS_ROCE_QP_STATE_INIT
,
129 HNS_ROCE_QP_STATE_RTR
,
130 HNS_ROCE_QP_STATE_RTS
,
131 HNS_ROCE_QP_STATE_SQD
,
132 HNS_ROCE_QP_STATE_ERR
,
133 HNS_ROCE_QP_NUM_STATE
,
136 enum hns_roce_event
{
137 HNS_ROCE_EVENT_TYPE_PATH_MIG
= 0x01,
138 HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED
= 0x02,
139 HNS_ROCE_EVENT_TYPE_COMM_EST
= 0x03,
140 HNS_ROCE_EVENT_TYPE_SQ_DRAINED
= 0x04,
141 HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR
= 0x05,
142 HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR
= 0x06,
143 HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR
= 0x07,
144 HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH
= 0x08,
145 HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH
= 0x09,
146 HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR
= 0x0a,
147 HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR
= 0x0b,
148 HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW
= 0x0c,
149 HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID
= 0x0d,
150 HNS_ROCE_EVENT_TYPE_PORT_CHANGE
= 0x0f,
151 /* 0x10 and 0x11 is unused in currently application case */
152 HNS_ROCE_EVENT_TYPE_DB_OVERFLOW
= 0x12,
153 HNS_ROCE_EVENT_TYPE_MB
= 0x13,
154 HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW
= 0x14,
155 HNS_ROCE_EVENT_TYPE_FLR
= 0x15,
158 /* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
160 HNS_ROCE_LWQCE_QPC_ERROR
= 1,
161 HNS_ROCE_LWQCE_MTU_ERROR
= 2,
162 HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR
= 3,
163 HNS_ROCE_LWQCE_WQE_ADDR_ERROR
= 4,
164 HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR
= 5,
165 HNS_ROCE_LWQCE_SL_ERROR
= 6,
166 HNS_ROCE_LWQCE_PORT_ERROR
= 7,
169 /* Local Access Violation Work Queue Error,SUBTYPE 0x7 */
171 HNS_ROCE_LAVWQE_R_KEY_VIOLATION
= 1,
172 HNS_ROCE_LAVWQE_LENGTH_ERROR
= 2,
173 HNS_ROCE_LAVWQE_VA_ERROR
= 3,
174 HNS_ROCE_LAVWQE_PD_ERROR
= 4,
175 HNS_ROCE_LAVWQE_RW_ACC_ERROR
= 5,
176 HNS_ROCE_LAVWQE_KEY_STATE_ERROR
= 6,
177 HNS_ROCE_LAVWQE_MR_OPERATION_ERROR
= 7,
180 /* DOORBELL overflow subtype */
182 HNS_ROCE_DB_SUBTYPE_SDB_OVF
= 1,
183 HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF
= 2,
184 HNS_ROCE_DB_SUBTYPE_ODB_OVF
= 3,
185 HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF
= 4,
186 HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP
= 5,
187 HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP
= 6,
191 /* RQ&SRQ related operations */
192 HNS_ROCE_OPCODE_SEND_DATA_RECEIVE
= 0x06,
193 HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE
= 0x07,
197 HNS_ROCE_CAP_FLAG_REREG_MR
= BIT(0),
198 HNS_ROCE_CAP_FLAG_ROCE_V1_V2
= BIT(1),
199 HNS_ROCE_CAP_FLAG_RQ_INLINE
= BIT(2),
200 HNS_ROCE_CAP_FLAG_RECORD_DB
= BIT(3),
201 HNS_ROCE_CAP_FLAG_SQ_RECORD_DB
= BIT(4),
202 HNS_ROCE_CAP_FLAG_SRQ
= BIT(5),
203 HNS_ROCE_CAP_FLAG_MW
= BIT(7),
204 HNS_ROCE_CAP_FLAG_FRMR
= BIT(8),
205 HNS_ROCE_CAP_FLAG_ATOMIC
= BIT(10),
208 enum hns_roce_mtt_type
{
216 HNS_ROCE_DB_PER_PAGE
= PAGE_SIZE
/ 4
219 #define HNS_ROCE_CMD_SUCCESS 1
221 #define HNS_ROCE_PORT_DOWN 0
222 #define HNS_ROCE_PORT_UP 1
224 #define HNS_ROCE_MTT_ENTRY_PER_SEG 8
226 #define PAGE_ADDR_SHIFT 12
228 struct hns_roce_uar
{
231 unsigned long logic_idx
;
234 struct hns_roce_ucontext
{
235 struct ib_ucontext ibucontext
;
236 struct hns_roce_uar uar
;
237 struct list_head page_list
;
238 struct mutex page_mutex
;
246 struct hns_roce_bitmap
{
247 /* Bitmap Traversal last a bit which is 1 */
251 unsigned long reserved_top
;
254 unsigned long *table
;
257 /* Order bitmap length -- bit num compute formula: 1 << (max_order - order) */
258 /* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */
259 /* Every bit repesent to a partner free/used status in bitmap */
261 * Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
262 * Bit = 1 represent to idle and available; bit = 0: not available
264 struct hns_roce_buddy
{
265 /* Members point to every order level bitmap */
266 unsigned long **bits
;
267 /* Represent to avail bits of the order level bitmap */
273 /* For Hardware Entry Memory */
274 struct hns_roce_hem_table
{
275 /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
277 /* HEM array elment num */
278 unsigned long num_hem
;
279 /* HEM entry record obj total num */
280 unsigned long num_obj
;
282 unsigned long obj_size
;
283 unsigned long table_chunk_size
;
286 struct hns_roce_hem
**hem
;
288 dma_addr_t
*bt_l1_dma_addr
;
290 dma_addr_t
*bt_l0_dma_addr
;
293 struct hns_roce_mtt
{
294 unsigned long first_seg
;
297 enum hns_roce_mtt_type mtt_type
;
304 int enabled
; /* MW's active status */
310 /* Only support 4K page size for mr register */
315 struct ib_umem
*umem
;
316 u64 iova
; /* MR's virtual orignal addr */
317 u64 size
; /* Address range of MR */
318 u32 key
; /* Key of MR */
319 u32 pd
; /* PD num of MR */
320 u32 access
;/* Access permission of MR */
322 int enabled
; /* MR's active status */
323 int type
; /* MR's register type */
324 u64
*pbl_buf
;/* MR's PBL space */
325 dma_addr_t pbl_dma_addr
; /* MR's PBL space PA */
326 u32 pbl_size
;/* PA number in the PBL */
327 u64 pbl_ba
;/* page table address */
328 u32 l0_chunk_last_num
;/* L0 last number */
329 u32 l1_chunk_last_num
;/* L1 last number */
330 u64
**pbl_bt_l2
;/* PBL BT L2 */
331 u64
**pbl_bt_l1
;/* PBL BT L1 */
332 u64
*pbl_bt_l0
;/* PBL BT L0 */
333 dma_addr_t
*pbl_l2_dma_addr
;/* PBL BT L2 dma addr */
334 dma_addr_t
*pbl_l1_dma_addr
;/* PBL BT L1 dma addr */
335 dma_addr_t pbl_l0_dma_addr
;/* PBL BT L0 dma addr */
336 u32 pbl_ba_pg_sz
;/* BT chunk page size */
337 u32 pbl_buf_pg_sz
;/* buf chunk page size */
338 u32 pbl_hop_num
;/* multi-hop number */
341 struct hns_roce_mr_table
{
342 struct hns_roce_bitmap mtpt_bitmap
;
343 struct hns_roce_buddy mtt_buddy
;
344 struct hns_roce_hem_table mtt_table
;
345 struct hns_roce_hem_table mtpt_table
;
346 struct hns_roce_buddy mtt_cqe_buddy
;
347 struct hns_roce_hem_table mtt_cqe_table
;
348 struct hns_roce_buddy mtt_srqwqe_buddy
;
349 struct hns_roce_hem_table mtt_srqwqe_table
;
350 struct hns_roce_buddy mtt_idx_buddy
;
351 struct hns_roce_hem_table mtt_idx_table
;
355 u64
*wrid
; /* Work request ID */
357 int wqe_cnt
; /* WQE num */
361 int wqe_shift
;/* WQE size */
364 void __iomem
*db_reg_l
;
367 struct hns_roce_sge
{
368 int sge_cnt
; /* SGE num */
370 int sge_shift
;/* SGE size */
373 struct hns_roce_buf_list
{
378 struct hns_roce_buf
{
379 struct hns_roce_buf_list direct
;
380 struct hns_roce_buf_list
*page_list
;
386 struct hns_roce_db_pgdir
{
387 struct list_head list
;
388 DECLARE_BITMAP(order0
, HNS_ROCE_DB_PER_PAGE
);
389 DECLARE_BITMAP(order1
, HNS_ROCE_DB_PER_PAGE
/ 2);
390 unsigned long *bits
[2];
395 struct hns_roce_user_db_page
{
396 struct list_head list
;
397 struct ib_umem
*umem
;
398 unsigned long user_virt
;
405 struct hns_roce_db_pgdir
*pgdir
;
406 struct hns_roce_user_db_page
*user_page
;
414 struct hns_roce_cq_buf
{
415 struct hns_roce_buf hr_buf
;
416 struct hns_roce_mtt hr_mtt
;
421 struct hns_roce_cq_buf hr_buf
;
422 struct hns_roce_db db
;
425 struct ib_umem
*umem
;
426 void (*comp
)(struct hns_roce_cq
*cq
);
427 void (*event
)(struct hns_roce_cq
*cq
, enum hns_roce_event event_type
);
429 struct hns_roce_uar
*uar
;
433 void __iomem
*cq_db_l
;
439 struct completion free
;
442 struct hns_roce_idx_que
{
443 struct hns_roce_buf idx_buf
;
446 struct ib_umem
*umem
;
447 struct hns_roce_mtt mtt
;
451 struct hns_roce_srq
{
453 void (*event
)(struct hns_roce_srq
*srq
, enum hns_roce_event event
);
458 void __iomem
*db_reg_l
;
461 struct completion free
;
463 struct hns_roce_buf buf
;
465 struct ib_umem
*umem
;
466 struct hns_roce_mtt mtt
;
467 struct hns_roce_idx_que idx_que
;
475 struct hns_roce_uar_table
{
476 struct hns_roce_bitmap bitmap
;
479 struct hns_roce_qp_table
{
480 struct hns_roce_bitmap bitmap
;
482 struct hns_roce_hem_table qp_table
;
483 struct hns_roce_hem_table irrl_table
;
484 struct hns_roce_hem_table trrl_table
;
487 struct hns_roce_cq_table
{
488 struct hns_roce_bitmap bitmap
;
490 struct radix_tree_root tree
;
491 struct hns_roce_hem_table table
;
494 struct hns_roce_srq_table
{
495 struct hns_roce_bitmap bitmap
;
497 struct hns_roce_hem_table table
;
500 struct hns_roce_raq_table
{
501 struct hns_roce_buf_list
*e_raq_buf
;
509 __le32 sl_tclass_flowlabel
;
510 u8 dgid
[HNS_ROCE_GID_SIZE
];
518 struct hns_roce_av av
;
521 struct hns_roce_cmd_context
{
522 struct completion done
;
529 struct hns_roce_cmdq
{
530 struct dma_pool
*pool
;
531 struct mutex hcr_mutex
;
532 struct semaphore poll_sem
;
534 * Event mode: cmd register mutex protection,
535 * ensure to not exceed max_cmds and user use limit region
537 struct semaphore event_sem
;
539 spinlock_t context_lock
;
541 struct hns_roce_cmd_context
*context
;
543 * Result of get integer part
544 * which max_comds compute according a power of 2
548 * Process whether use event mode, init default non-zero
549 * After the event queue of cmd event ready,
550 * can switch into event mode
551 * close device, switch into poll mode(non event mode)
557 struct hns_roce_cmd_mailbox
{
564 struct hns_roce_rinl_sge
{
569 struct hns_roce_rinl_wqe
{
570 struct hns_roce_rinl_sge
*sg_list
;
574 struct hns_roce_rinl_buf
{
575 struct hns_roce_rinl_wqe
*wqe_list
;
581 struct hns_roce_buf hr_buf
;
582 struct hns_roce_wq rq
;
583 struct hns_roce_db rdb
;
584 struct hns_roce_db sdb
;
588 __le32 sq_signal_bits
;
590 int sq_max_wqes_per_wr
;
592 struct hns_roce_wq sq
;
594 struct ib_umem
*umem
;
595 struct hns_roce_mtt mtt
;
607 void (*event
)(struct hns_roce_qp
*qp
,
608 enum hns_roce_event event_type
);
612 struct completion free
;
614 struct hns_roce_sge sge
;
617 struct hns_roce_rinl_buf rq_inl_buf
;
620 struct hns_roce_sqp
{
621 struct hns_roce_qp hr_qp
;
624 struct hns_roce_ib_iboe
{
626 struct net_device
*netdevs
[HNS_ROCE_MAX_PORTS
];
627 struct notifier_block nb
;
628 u8 phy_port
[HNS_ROCE_MAX_PORTS
];
632 HNS_ROCE_EQ_STAT_INVALID
= 0,
633 HNS_ROCE_EQ_STAT_VALID
= 2,
636 struct hns_roce_ceqe
{
640 struct hns_roce_aeqe
{
677 struct hns_roce_dev
*hr_dev
;
678 void __iomem
*doorbell
;
680 int type_flag
;/* Aeq:1 ceq:0 */
688 struct hns_roce_buf_list
*buf_list
;
696 u64
*bt_l0
; /* Base address table for L0 */
697 u64
**bt_l1
; /* Base address table for L1 */
702 u32 l0_last_num
; /* L0 last chunk num */
703 u32 l1_last_num
; /* L1 last chunk num */
707 dma_addr_t cur_eqe_ba
;
708 dma_addr_t nxt_eqe_ba
;
713 struct hns_roce_eq_table
{
714 struct hns_roce_eq
*eq
;
715 void __iomem
**eqc_base
; /* only for hw v1 */
718 struct hns_roce_caps
{
721 int gid_table_len
[HNS_ROCE_MAX_PORTS
];
722 int pkey_table_len
[HNS_ROCE_MAX_PORTS
];
723 int local_ca_ack_delay
;
726 u32 max_sq_sg
; /* 2 */
727 u32 max_sq_inline
; /* 32 */
728 u32 max_rq_sg
; /* 2 */
730 int num_qps
; /* 256k */
734 u32 max_wqes
; /* 16k */
738 u32 max_sq_desc_sz
; /* 64 */
739 u32 max_rq_desc_sz
; /* 64 */
741 int max_qp_init_rdma
;
742 int max_qp_dest_rdma
;
750 int num_aeq_vectors
; /* 1 */
751 int num_comp_vectors
;
752 int num_other_vectors
;
802 u32 srqwqe_buf_pg_sz
;
813 u32 chunk_sz
; /* chunk size in non multihop mode*/
817 struct hns_roce_work
{
818 struct hns_roce_dev
*hr_dev
;
819 struct work_struct work
;
827 int (*reset
)(struct hns_roce_dev
*hr_dev
, bool enable
);
828 int (*cmq_init
)(struct hns_roce_dev
*hr_dev
);
829 void (*cmq_exit
)(struct hns_roce_dev
*hr_dev
);
830 int (*hw_profile
)(struct hns_roce_dev
*hr_dev
);
831 int (*hw_init
)(struct hns_roce_dev
*hr_dev
);
832 void (*hw_exit
)(struct hns_roce_dev
*hr_dev
);
833 int (*post_mbox
)(struct hns_roce_dev
*hr_dev
, u64 in_param
,
834 u64 out_param
, u32 in_modifier
, u8 op_modifier
, u16 op
,
835 u16 token
, int event
);
836 int (*chk_mbox
)(struct hns_roce_dev
*hr_dev
, unsigned long timeout
);
837 int (*set_gid
)(struct hns_roce_dev
*hr_dev
, u8 port
, int gid_index
,
838 const union ib_gid
*gid
, const struct ib_gid_attr
*attr
);
839 int (*set_mac
)(struct hns_roce_dev
*hr_dev
, u8 phy_port
, u8
*addr
);
840 void (*set_mtu
)(struct hns_roce_dev
*hr_dev
, u8 phy_port
,
842 int (*write_mtpt
)(void *mb_buf
, struct hns_roce_mr
*mr
,
843 unsigned long mtpt_idx
);
844 int (*rereg_write_mtpt
)(struct hns_roce_dev
*hr_dev
,
845 struct hns_roce_mr
*mr
, int flags
, u32 pdn
,
846 int mr_access_flags
, u64 iova
, u64 size
,
848 int (*frmr_write_mtpt
)(void *mb_buf
, struct hns_roce_mr
*mr
);
849 int (*mw_write_mtpt
)(void *mb_buf
, struct hns_roce_mw
*mw
);
850 void (*write_cqc
)(struct hns_roce_dev
*hr_dev
,
851 struct hns_roce_cq
*hr_cq
, void *mb_buf
, u64
*mtts
,
852 dma_addr_t dma_handle
, int nent
, u32 vector
);
853 int (*set_hem
)(struct hns_roce_dev
*hr_dev
,
854 struct hns_roce_hem_table
*table
, int obj
, int step_idx
);
855 int (*clear_hem
)(struct hns_roce_dev
*hr_dev
,
856 struct hns_roce_hem_table
*table
, int obj
,
858 int (*query_qp
)(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
,
859 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
);
860 int (*modify_qp
)(struct ib_qp
*ibqp
, const struct ib_qp_attr
*attr
,
861 int attr_mask
, enum ib_qp_state cur_state
,
862 enum ib_qp_state new_state
);
863 int (*destroy_qp
)(struct ib_qp
*ibqp
);
864 int (*post_send
)(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
865 const struct ib_send_wr
**bad_wr
);
866 int (*post_recv
)(struct ib_qp
*qp
, const struct ib_recv_wr
*recv_wr
,
867 const struct ib_recv_wr
**bad_recv_wr
);
868 int (*req_notify_cq
)(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
);
869 int (*poll_cq
)(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
);
870 int (*dereg_mr
)(struct hns_roce_dev
*hr_dev
, struct hns_roce_mr
*mr
);
871 int (*destroy_cq
)(struct ib_cq
*ibcq
);
872 int (*modify_cq
)(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
);
873 int (*init_eq
)(struct hns_roce_dev
*hr_dev
);
874 void (*cleanup_eq
)(struct hns_roce_dev
*hr_dev
);
875 void (*write_srqc
)(struct hns_roce_dev
*hr_dev
,
876 struct hns_roce_srq
*srq
, u32 pdn
, u16 xrcd
, u32 cqn
,
877 void *mb_buf
, u64
*mtts_wqe
, u64
*mtts_idx
,
878 dma_addr_t dma_handle_wqe
,
879 dma_addr_t dma_handle_idx
);
880 int (*modify_srq
)(struct ib_srq
*ibsrq
, struct ib_srq_attr
*srq_attr
,
881 enum ib_srq_attr_mask srq_attr_mask
,
882 struct ib_udata
*udata
);
883 int (*query_srq
)(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
);
884 int (*post_srq_recv
)(struct ib_srq
*ibsrq
, const struct ib_recv_wr
*wr
,
885 const struct ib_recv_wr
**bad_wr
);
886 const struct ib_device_ops
*hns_roce_dev_ops
;
887 const struct ib_device_ops
*hns_roce_dev_srq_ops
;
890 struct hns_roce_dev
{
891 struct ib_device ib_dev
;
892 struct platform_device
*pdev
;
893 struct pci_dev
*pci_dev
;
895 struct hns_roce_uar priv_uar
;
896 const char *irq_names
[HNS_ROCE_MAX_IRQ_NUM
];
898 spinlock_t bt_cmd_lock
;
901 struct hns_roce_ib_iboe iboe
;
903 struct list_head pgdir_list
;
904 struct mutex pgdir_mutex
;
905 int irq
[HNS_ROCE_MAX_IRQ_NUM
];
906 u8 __iomem
*reg_base
;
907 struct hns_roce_caps caps
;
908 struct radix_tree_root qp_table_tree
;
910 unsigned char dev_addr
[HNS_ROCE_MAX_PORTS
][MAC_ADDR_OCTET_NUM
];
915 void __iomem
*priv_addr
;
917 struct hns_roce_cmdq cmd
;
918 struct hns_roce_bitmap pd_bitmap
;
919 struct hns_roce_uar_table uar_table
;
920 struct hns_roce_mr_table mr_table
;
921 struct hns_roce_cq_table cq_table
;
922 struct hns_roce_srq_table srq_table
;
923 struct hns_roce_qp_table qp_table
;
924 struct hns_roce_eq_table eq_table
;
930 dma_addr_t tptr_dma_addr
; /*only for hw v1*/
931 u32 tptr_size
; /*only for hw v1*/
932 const struct hns_roce_hw
*hw
;
934 struct workqueue_struct
*irq_workq
;
937 static inline struct hns_roce_dev
*to_hr_dev(struct ib_device
*ib_dev
)
939 return container_of(ib_dev
, struct hns_roce_dev
, ib_dev
);
942 static inline struct hns_roce_ucontext
943 *to_hr_ucontext(struct ib_ucontext
*ibucontext
)
945 return container_of(ibucontext
, struct hns_roce_ucontext
, ibucontext
);
948 static inline struct hns_roce_pd
*to_hr_pd(struct ib_pd
*ibpd
)
950 return container_of(ibpd
, struct hns_roce_pd
, ibpd
);
953 static inline struct hns_roce_ah
*to_hr_ah(struct ib_ah
*ibah
)
955 return container_of(ibah
, struct hns_roce_ah
, ibah
);
958 static inline struct hns_roce_mr
*to_hr_mr(struct ib_mr
*ibmr
)
960 return container_of(ibmr
, struct hns_roce_mr
, ibmr
);
963 static inline struct hns_roce_mw
*to_hr_mw(struct ib_mw
*ibmw
)
965 return container_of(ibmw
, struct hns_roce_mw
, ibmw
);
968 static inline struct hns_roce_qp
*to_hr_qp(struct ib_qp
*ibqp
)
970 return container_of(ibqp
, struct hns_roce_qp
, ibqp
);
973 static inline struct hns_roce_cq
*to_hr_cq(struct ib_cq
*ib_cq
)
975 return container_of(ib_cq
, struct hns_roce_cq
, ib_cq
);
978 static inline struct hns_roce_srq
*to_hr_srq(struct ib_srq
*ibsrq
)
980 return container_of(ibsrq
, struct hns_roce_srq
, ibsrq
);
983 static inline struct hns_roce_sqp
*hr_to_hr_sqp(struct hns_roce_qp
*hr_qp
)
985 return container_of(hr_qp
, struct hns_roce_sqp
, hr_qp
);
988 static inline void hns_roce_write64_k(__le32 val
[2], void __iomem
*dest
)
990 __raw_writeq(*(u64
*) val
, dest
);
993 static inline struct hns_roce_qp
994 *__hns_roce_qp_lookup(struct hns_roce_dev
*hr_dev
, u32 qpn
)
996 return radix_tree_lookup(&hr_dev
->qp_table_tree
,
997 qpn
& (hr_dev
->caps
.num_qps
- 1));
1000 static inline void *hns_roce_buf_offset(struct hns_roce_buf
*buf
, int offset
)
1002 u32 page_size
= 1 << buf
->page_shift
;
1004 if (buf
->nbufs
== 1)
1005 return (char *)(buf
->direct
.buf
) + offset
;
1007 return (char *)(buf
->page_list
[offset
>> buf
->page_shift
].buf
) +
1008 (offset
& (page_size
- 1));
1011 int hns_roce_init_uar_table(struct hns_roce_dev
*dev
);
1012 int hns_roce_uar_alloc(struct hns_roce_dev
*dev
, struct hns_roce_uar
*uar
);
1013 void hns_roce_uar_free(struct hns_roce_dev
*dev
, struct hns_roce_uar
*uar
);
1014 void hns_roce_cleanup_uar_table(struct hns_roce_dev
*dev
);
1016 int hns_roce_cmd_init(struct hns_roce_dev
*hr_dev
);
1017 void hns_roce_cmd_cleanup(struct hns_roce_dev
*hr_dev
);
1018 void hns_roce_cmd_event(struct hns_roce_dev
*hr_dev
, u16 token
, u8 status
,
1020 int hns_roce_cmd_use_events(struct hns_roce_dev
*hr_dev
);
1021 void hns_roce_cmd_use_polling(struct hns_roce_dev
*hr_dev
);
1023 int hns_roce_mtt_init(struct hns_roce_dev
*hr_dev
, int npages
, int page_shift
,
1024 struct hns_roce_mtt
*mtt
);
1025 void hns_roce_mtt_cleanup(struct hns_roce_dev
*hr_dev
,
1026 struct hns_roce_mtt
*mtt
);
1027 int hns_roce_buf_write_mtt(struct hns_roce_dev
*hr_dev
,
1028 struct hns_roce_mtt
*mtt
, struct hns_roce_buf
*buf
);
1030 int hns_roce_init_pd_table(struct hns_roce_dev
*hr_dev
);
1031 int hns_roce_init_mr_table(struct hns_roce_dev
*hr_dev
);
1032 int hns_roce_init_eq_table(struct hns_roce_dev
*hr_dev
);
1033 int hns_roce_init_cq_table(struct hns_roce_dev
*hr_dev
);
1034 int hns_roce_init_qp_table(struct hns_roce_dev
*hr_dev
);
1035 int hns_roce_init_srq_table(struct hns_roce_dev
*hr_dev
);
1037 void hns_roce_cleanup_pd_table(struct hns_roce_dev
*hr_dev
);
1038 void hns_roce_cleanup_mr_table(struct hns_roce_dev
*hr_dev
);
1039 void hns_roce_cleanup_eq_table(struct hns_roce_dev
*hr_dev
);
1040 void hns_roce_cleanup_cq_table(struct hns_roce_dev
*hr_dev
);
1041 void hns_roce_cleanup_qp_table(struct hns_roce_dev
*hr_dev
);
1042 void hns_roce_cleanup_srq_table(struct hns_roce_dev
*hr_dev
);
1044 int hns_roce_bitmap_alloc(struct hns_roce_bitmap
*bitmap
, unsigned long *obj
);
1045 void hns_roce_bitmap_free(struct hns_roce_bitmap
*bitmap
, unsigned long obj
,
1047 int hns_roce_bitmap_init(struct hns_roce_bitmap
*bitmap
, u32 num
, u32 mask
,
1048 u32 reserved_bot
, u32 resetrved_top
);
1049 void hns_roce_bitmap_cleanup(struct hns_roce_bitmap
*bitmap
);
1050 void hns_roce_cleanup_bitmap(struct hns_roce_dev
*hr_dev
);
1051 int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap
*bitmap
, int cnt
,
1052 int align
, unsigned long *obj
);
1053 void hns_roce_bitmap_free_range(struct hns_roce_bitmap
*bitmap
,
1054 unsigned long obj
, int cnt
,
1057 struct ib_ah
*hns_roce_create_ah(struct ib_pd
*pd
,
1058 struct rdma_ah_attr
*ah_attr
,
1060 struct ib_udata
*udata
);
1061 int hns_roce_query_ah(struct ib_ah
*ibah
, struct rdma_ah_attr
*ah_attr
);
1062 int hns_roce_destroy_ah(struct ib_ah
*ah
, u32 flags
);
1064 struct ib_pd
*hns_roce_alloc_pd(struct ib_device
*ib_dev
,
1065 struct ib_ucontext
*context
,
1066 struct ib_udata
*udata
);
1067 int hns_roce_dealloc_pd(struct ib_pd
*pd
);
1069 struct ib_mr
*hns_roce_get_dma_mr(struct ib_pd
*pd
, int acc
);
1070 struct ib_mr
*hns_roce_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
1071 u64 virt_addr
, int access_flags
,
1072 struct ib_udata
*udata
);
1073 int hns_roce_rereg_user_mr(struct ib_mr
*mr
, int flags
, u64 start
, u64 length
,
1074 u64 virt_addr
, int mr_access_flags
, struct ib_pd
*pd
,
1075 struct ib_udata
*udata
);
1076 struct ib_mr
*hns_roce_alloc_mr(struct ib_pd
*pd
, enum ib_mr_type mr_type
,
1078 int hns_roce_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
, int sg_nents
,
1079 unsigned int *sg_offset
);
1080 int hns_roce_dereg_mr(struct ib_mr
*ibmr
);
1081 int hns_roce_hw2sw_mpt(struct hns_roce_dev
*hr_dev
,
1082 struct hns_roce_cmd_mailbox
*mailbox
,
1083 unsigned long mpt_index
);
1084 unsigned long key_to_hw_index(u32 key
);
1086 struct ib_mw
*hns_roce_alloc_mw(struct ib_pd
*pd
, enum ib_mw_type
,
1087 struct ib_udata
*udata
);
1088 int hns_roce_dealloc_mw(struct ib_mw
*ibmw
);
1090 void hns_roce_buf_free(struct hns_roce_dev
*hr_dev
, u32 size
,
1091 struct hns_roce_buf
*buf
);
1092 int hns_roce_buf_alloc(struct hns_roce_dev
*hr_dev
, u32 size
, u32 max_direct
,
1093 struct hns_roce_buf
*buf
, u32 page_shift
);
1095 int hns_roce_ib_umem_write_mtt(struct hns_roce_dev
*hr_dev
,
1096 struct hns_roce_mtt
*mtt
, struct ib_umem
*umem
);
1098 struct ib_srq
*hns_roce_create_srq(struct ib_pd
*pd
,
1099 struct ib_srq_init_attr
*srq_init_attr
,
1100 struct ib_udata
*udata
);
1101 int hns_roce_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*srq_attr
,
1102 enum ib_srq_attr_mask srq_attr_mask
,
1103 struct ib_udata
*udata
);
1104 int hns_roce_destroy_srq(struct ib_srq
*ibsrq
);
1106 struct ib_qp
*hns_roce_create_qp(struct ib_pd
*ib_pd
,
1107 struct ib_qp_init_attr
*init_attr
,
1108 struct ib_udata
*udata
);
1109 int hns_roce_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1110 int attr_mask
, struct ib_udata
*udata
);
1111 void *get_recv_wqe(struct hns_roce_qp
*hr_qp
, int n
);
1112 void *get_send_wqe(struct hns_roce_qp
*hr_qp
, int n
);
1113 void *get_send_extend_sge(struct hns_roce_qp
*hr_qp
, int n
);
1114 bool hns_roce_wq_overflow(struct hns_roce_wq
*hr_wq
, int nreq
,
1115 struct ib_cq
*ib_cq
);
1116 enum hns_roce_qp_state
to_hns_roce_state(enum ib_qp_state state
);
1117 void hns_roce_lock_cqs(struct hns_roce_cq
*send_cq
,
1118 struct hns_roce_cq
*recv_cq
);
1119 void hns_roce_unlock_cqs(struct hns_roce_cq
*send_cq
,
1120 struct hns_roce_cq
*recv_cq
);
1121 void hns_roce_qp_remove(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
);
1122 void hns_roce_qp_free(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
);
1123 void hns_roce_release_range_qp(struct hns_roce_dev
*hr_dev
, int base_qpn
,
1125 __be32
send_ieth(const struct ib_send_wr
*wr
);
1126 int to_hr_qp_type(int qp_type
);
1128 struct ib_cq
*hns_roce_ib_create_cq(struct ib_device
*ib_dev
,
1129 const struct ib_cq_init_attr
*attr
,
1130 struct ib_ucontext
*context
,
1131 struct ib_udata
*udata
);
1133 int hns_roce_ib_destroy_cq(struct ib_cq
*ib_cq
);
1134 void hns_roce_free_cq(struct hns_roce_dev
*hr_dev
, struct hns_roce_cq
*hr_cq
);
1136 int hns_roce_db_map_user(struct hns_roce_ucontext
*context
, unsigned long virt
,
1137 struct hns_roce_db
*db
);
1138 void hns_roce_db_unmap_user(struct hns_roce_ucontext
*context
,
1139 struct hns_roce_db
*db
);
1140 int hns_roce_alloc_db(struct hns_roce_dev
*hr_dev
, struct hns_roce_db
*db
,
1142 void hns_roce_free_db(struct hns_roce_dev
*hr_dev
, struct hns_roce_db
*db
);
1144 void hns_roce_cq_completion(struct hns_roce_dev
*hr_dev
, u32 cqn
);
1145 void hns_roce_cq_event(struct hns_roce_dev
*hr_dev
, u32 cqn
, int event_type
);
1146 void hns_roce_qp_event(struct hns_roce_dev
*hr_dev
, u32 qpn
, int event_type
);
1147 void hns_roce_srq_event(struct hns_roce_dev
*hr_dev
, u32 srqn
, int event_type
);
1148 int hns_get_gid_index(struct hns_roce_dev
*hr_dev
, u8 port
, int gid_index
);
1149 int hns_roce_init(struct hns_roce_dev
*hr_dev
);
1150 void hns_roce_exit(struct hns_roce_dev
*hr_dev
);
1152 #endif /* _HNS_ROCE_DEVICE_H */