]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/mlx5/qp.h
{net,IB}/mlx5: Modify QP commands via mlx5 ifc
[mirror_ubuntu-bionic-kernel.git] / include / linux / mlx5 / qp.h
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef MLX5_QP_H
34 #define MLX5_QP_H
35
36 #include <linux/mlx5/device.h>
37 #include <linux/mlx5/driver.h>
38
39 #define MLX5_INVALID_LKEY 0x100
40 #define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
41 #define MLX5_DIF_SIZE 8
42 #define MLX5_STRIDE_BLOCK_OP 0x400
43 #define MLX5_CPY_GRD_MASK 0xc0
44 #define MLX5_CPY_APP_MASK 0x30
45 #define MLX5_CPY_REF_MASK 0x0f
46 #define MLX5_BSF_INC_REFTAG (1 << 6)
47 #define MLX5_BSF_INL_VALID (1 << 15)
48 #define MLX5_BSF_REFRESH_DIF (1 << 14)
49 #define MLX5_BSF_REPEAT_BLOCK (1 << 7)
50 #define MLX5_BSF_APPTAG_ESCAPE 0x1
51 #define MLX5_BSF_APPREF_ESCAPE 0x2
52
53 #define MLX5_QPN_BITS 24
54 #define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1)
55
56 enum mlx5_qp_optpar {
57 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
58 MLX5_QP_OPTPAR_RRE = 1 << 1,
59 MLX5_QP_OPTPAR_RAE = 1 << 2,
60 MLX5_QP_OPTPAR_RWE = 1 << 3,
61 MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4,
62 MLX5_QP_OPTPAR_Q_KEY = 1 << 5,
63 MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
64 MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
65 MLX5_QP_OPTPAR_SRA_MAX = 1 << 8,
66 MLX5_QP_OPTPAR_RRA_MAX = 1 << 9,
67 MLX5_QP_OPTPAR_PM_STATE = 1 << 10,
68 MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
69 MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
70 MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
71 MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
72 MLX5_QP_OPTPAR_SRQN = 1 << 18,
73 MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
74 MLX5_QP_OPTPAR_DC_HS = 1 << 20,
75 MLX5_QP_OPTPAR_DC_KEY = 1 << 21,
76 };
77
78 enum mlx5_qp_state {
79 MLX5_QP_STATE_RST = 0,
80 MLX5_QP_STATE_INIT = 1,
81 MLX5_QP_STATE_RTR = 2,
82 MLX5_QP_STATE_RTS = 3,
83 MLX5_QP_STATE_SQER = 4,
84 MLX5_QP_STATE_SQD = 5,
85 MLX5_QP_STATE_ERR = 6,
86 MLX5_QP_STATE_SQ_DRAINING = 7,
87 MLX5_QP_STATE_SUSPENDED = 9,
88 MLX5_QP_NUM_STATE,
89 MLX5_QP_STATE,
90 MLX5_QP_STATE_BAD,
91 };
92
93 enum {
94 MLX5_SQ_STATE_NA = MLX5_SQC_STATE_ERR + 1,
95 MLX5_SQ_NUM_STATE = MLX5_SQ_STATE_NA + 1,
96 MLX5_RQ_STATE_NA = MLX5_RQC_STATE_ERR + 1,
97 MLX5_RQ_NUM_STATE = MLX5_RQ_STATE_NA + 1,
98 };
99
100 enum {
101 MLX5_QP_ST_RC = 0x0,
102 MLX5_QP_ST_UC = 0x1,
103 MLX5_QP_ST_UD = 0x2,
104 MLX5_QP_ST_XRC = 0x3,
105 MLX5_QP_ST_MLX = 0x4,
106 MLX5_QP_ST_DCI = 0x5,
107 MLX5_QP_ST_DCT = 0x6,
108 MLX5_QP_ST_QP0 = 0x7,
109 MLX5_QP_ST_QP1 = 0x8,
110 MLX5_QP_ST_RAW_ETHERTYPE = 0x9,
111 MLX5_QP_ST_RAW_IPV6 = 0xa,
112 MLX5_QP_ST_SNIFFER = 0xb,
113 MLX5_QP_ST_SYNC_UMR = 0xe,
114 MLX5_QP_ST_PTP_1588 = 0xd,
115 MLX5_QP_ST_REG_UMR = 0xc,
116 MLX5_QP_ST_MAX
117 };
118
119 enum {
120 MLX5_QP_PM_MIGRATED = 0x3,
121 MLX5_QP_PM_ARMED = 0x0,
122 MLX5_QP_PM_REARM = 0x1
123 };
124
125 enum {
126 MLX5_NON_ZERO_RQ = 0x0,
127 MLX5_SRQ_RQ = 0x1,
128 MLX5_CRQ_RQ = 0x2,
129 MLX5_ZERO_LEN_RQ = 0x3
130 };
131
132 /* TODO REM */
133 enum {
134 /* params1 */
135 MLX5_QP_BIT_SRE = 1 << 15,
136 MLX5_QP_BIT_SWE = 1 << 14,
137 MLX5_QP_BIT_SAE = 1 << 13,
138 /* params2 */
139 MLX5_QP_BIT_RRE = 1 << 15,
140 MLX5_QP_BIT_RWE = 1 << 14,
141 MLX5_QP_BIT_RAE = 1 << 13,
142 MLX5_QP_BIT_RIC = 1 << 4,
143 MLX5_QP_BIT_CC_SLAVE_RECV = 1 << 2,
144 MLX5_QP_BIT_CC_SLAVE_SEND = 1 << 1,
145 MLX5_QP_BIT_CC_MASTER = 1 << 0
146 };
147
148 enum {
149 MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
150 MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
151 MLX5_WQE_CTRL_SOLICITED = 1 << 1,
152 };
153
154 enum {
155 MLX5_SEND_WQE_DS = 16,
156 MLX5_SEND_WQE_BB = 64,
157 };
158
159 #define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
160
161 enum {
162 MLX5_SEND_WQE_MAX_WQEBBS = 16,
163 };
164
165 enum {
166 MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
167 MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
168 MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29,
169 MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30,
170 MLX5_WQE_FMR_PERM_ATOMIC = 1 << 31
171 };
172
173 enum {
174 MLX5_FENCE_MODE_NONE = 0 << 5,
175 MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
176 MLX5_FENCE_MODE_FENCE = 2 << 5,
177 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
178 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
179 };
180
181 enum {
182 MLX5_RCV_DBR = 0,
183 MLX5_SND_DBR = 1,
184 };
185
186 enum {
187 MLX5_FLAGS_INLINE = 1<<7,
188 MLX5_FLAGS_CHECK_FREE = 1<<5,
189 };
190
191 struct mlx5_wqe_fmr_seg {
192 __be32 flags;
193 __be32 mem_key;
194 __be64 buf_list;
195 __be64 start_addr;
196 __be64 reg_len;
197 __be32 offset;
198 __be32 page_size;
199 u32 reserved[2];
200 };
201
202 struct mlx5_wqe_ctrl_seg {
203 __be32 opmod_idx_opcode;
204 __be32 qpn_ds;
205 u8 signature;
206 u8 rsvd[2];
207 u8 fm_ce_se;
208 __be32 imm;
209 };
210
211 #define MLX5_WQE_CTRL_DS_MASK 0x3f
212 #define MLX5_WQE_CTRL_QPN_MASK 0xffffff00
213 #define MLX5_WQE_CTRL_QPN_SHIFT 8
214 #define MLX5_WQE_DS_UNITS 16
215 #define MLX5_WQE_CTRL_OPCODE_MASK 0xff
216 #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
217 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
218
219 enum {
220 MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
221 MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5,
222 MLX5_ETH_WQE_L3_CSUM = 1 << 6,
223 MLX5_ETH_WQE_L4_CSUM = 1 << 7,
224 };
225
226 struct mlx5_wqe_eth_seg {
227 u8 rsvd0[4];
228 u8 cs_flags;
229 u8 rsvd1;
230 __be16 mss;
231 __be32 rsvd2;
232 __be16 inline_hdr_sz;
233 u8 inline_hdr_start[2];
234 };
235
236 struct mlx5_wqe_xrc_seg {
237 __be32 xrc_srqn;
238 u8 rsvd[12];
239 };
240
241 struct mlx5_wqe_masked_atomic_seg {
242 __be64 swap_add;
243 __be64 compare;
244 __be64 swap_add_mask;
245 __be64 compare_mask;
246 };
247
248 struct mlx5_av {
249 union {
250 struct {
251 __be32 qkey;
252 __be32 reserved;
253 } qkey;
254 __be64 dc_key;
255 } key;
256 __be32 dqp_dct;
257 u8 stat_rate_sl;
258 u8 fl_mlid;
259 union {
260 __be16 rlid;
261 __be16 udp_sport;
262 };
263 u8 reserved0[4];
264 u8 rmac[6];
265 u8 tclass;
266 u8 hop_limit;
267 __be32 grh_gid_fl;
268 u8 rgid[16];
269 };
270
271 struct mlx5_wqe_datagram_seg {
272 struct mlx5_av av;
273 };
274
275 struct mlx5_wqe_raddr_seg {
276 __be64 raddr;
277 __be32 rkey;
278 u32 reserved;
279 };
280
281 struct mlx5_wqe_atomic_seg {
282 __be64 swap_add;
283 __be64 compare;
284 };
285
286 struct mlx5_wqe_data_seg {
287 __be32 byte_count;
288 __be32 lkey;
289 __be64 addr;
290 };
291
292 struct mlx5_wqe_umr_ctrl_seg {
293 u8 flags;
294 u8 rsvd0[3];
295 __be16 klm_octowords;
296 __be16 bsf_octowords;
297 __be64 mkey_mask;
298 u8 rsvd1[32];
299 };
300
301 struct mlx5_seg_set_psv {
302 __be32 psv_num;
303 __be16 syndrome;
304 __be16 status;
305 __be32 transient_sig;
306 __be32 ref_tag;
307 };
308
309 struct mlx5_seg_get_psv {
310 u8 rsvd[19];
311 u8 num_psv;
312 __be32 l_key;
313 __be64 va;
314 __be32 psv_index[4];
315 };
316
317 struct mlx5_seg_check_psv {
318 u8 rsvd0[2];
319 __be16 err_coalescing_op;
320 u8 rsvd1[2];
321 __be16 xport_err_op;
322 u8 rsvd2[2];
323 __be16 xport_err_mask;
324 u8 rsvd3[7];
325 u8 num_psv;
326 __be32 l_key;
327 __be64 va;
328 __be32 psv_index[4];
329 };
330
331 struct mlx5_rwqe_sig {
332 u8 rsvd0[4];
333 u8 signature;
334 u8 rsvd1[11];
335 };
336
337 struct mlx5_wqe_signature_seg {
338 u8 rsvd0[4];
339 u8 signature;
340 u8 rsvd1[11];
341 };
342
343 #define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff
344
345 struct mlx5_wqe_inline_seg {
346 __be32 byte_count;
347 };
348
349 enum mlx5_sig_type {
350 MLX5_DIF_CRC = 0x1,
351 MLX5_DIF_IPCS = 0x2,
352 };
353
354 struct mlx5_bsf_inl {
355 __be16 vld_refresh;
356 __be16 dif_apptag;
357 __be32 dif_reftag;
358 u8 sig_type;
359 u8 rp_inv_seed;
360 u8 rsvd[3];
361 u8 dif_inc_ref_guard_check;
362 __be16 dif_app_bitmask_check;
363 };
364
365 struct mlx5_bsf {
366 struct mlx5_bsf_basic {
367 u8 bsf_size_sbs;
368 u8 check_byte_mask;
369 union {
370 u8 copy_byte_mask;
371 u8 bs_selector;
372 u8 rsvd_wflags;
373 } wire;
374 union {
375 u8 bs_selector;
376 u8 rsvd_mflags;
377 } mem;
378 __be32 raw_data_size;
379 __be32 w_bfs_psv;
380 __be32 m_bfs_psv;
381 } basic;
382 struct mlx5_bsf_ext {
383 __be32 t_init_gen_pro_size;
384 __be32 rsvd_epi_size;
385 __be32 w_tfs_psv;
386 __be32 m_tfs_psv;
387 } ext;
388 struct mlx5_bsf_inl w_inl;
389 struct mlx5_bsf_inl m_inl;
390 };
391
392 struct mlx5_klm {
393 __be32 bcount;
394 __be32 key;
395 __be64 va;
396 };
397
398 struct mlx5_stride_block_entry {
399 __be16 stride;
400 __be16 bcount;
401 __be32 key;
402 __be64 va;
403 };
404
405 struct mlx5_stride_block_ctrl_seg {
406 __be32 bcount_per_cycle;
407 __be32 op;
408 __be32 repeat_count;
409 u16 rsvd;
410 __be16 num_entries;
411 };
412
413 enum mlx5_pagefault_flags {
414 MLX5_PFAULT_REQUESTOR = 1 << 0,
415 MLX5_PFAULT_WRITE = 1 << 1,
416 MLX5_PFAULT_RDMA = 1 << 2,
417 };
418
419 /* Contains the details of a pagefault. */
420 struct mlx5_pagefault {
421 u32 bytes_committed;
422 u8 event_subtype;
423 enum mlx5_pagefault_flags flags;
424 union {
425 /* Initiator or send message responder pagefault details. */
426 struct {
427 /* Received packet size, only valid for responders. */
428 u32 packet_size;
429 /*
430 * WQE index. Refers to either the send queue or
431 * receive queue, according to event_subtype.
432 */
433 u16 wqe_index;
434 } wqe;
435 /* RDMA responder pagefault details */
436 struct {
437 u32 r_key;
438 /*
439 * Received packet size, minimal size page fault
440 * resolution required for forward progress.
441 */
442 u32 packet_size;
443 u32 rdma_op_len;
444 u64 rdma_va;
445 } rdma;
446 };
447 };
448
449 struct mlx5_core_qp {
450 struct mlx5_core_rsc_common common; /* must be first */
451 void (*event) (struct mlx5_core_qp *, int);
452 void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
453 int qpn;
454 struct mlx5_rsc_debug *dbg;
455 int pid;
456 };
457
458 struct mlx5_qp_path {
459 u8 fl_free_ar;
460 u8 rsvd3;
461 __be16 pkey_index;
462 u8 rsvd0;
463 u8 grh_mlid;
464 __be16 rlid;
465 u8 ackto_lt;
466 u8 mgid_index;
467 u8 static_rate;
468 u8 hop_limit;
469 __be32 tclass_flowlabel;
470 union {
471 u8 rgid[16];
472 u8 rip[16];
473 };
474 u8 f_dscp_ecn_prio;
475 u8 ecn_dscp;
476 __be16 udp_sport;
477 u8 dci_cfi_prio_sl;
478 u8 port;
479 u8 rmac[6];
480 };
481
482 /* FIXME: use mlx5_ifc.h qpc */
483 struct mlx5_qp_context {
484 __be32 flags;
485 __be32 flags_pd;
486 u8 mtu_msgmax;
487 u8 rq_size_stride;
488 __be16 sq_crq_size;
489 __be32 qp_counter_set_usr_page;
490 __be32 wire_qpn;
491 __be32 log_pg_sz_remote_qpn;
492 struct mlx5_qp_path pri_path;
493 struct mlx5_qp_path alt_path;
494 __be32 params1;
495 u8 reserved2[4];
496 __be32 next_send_psn;
497 __be32 cqn_send;
498 __be32 deth_sqpn;
499 u8 reserved3[4];
500 __be32 last_acked_psn;
501 __be32 ssn;
502 __be32 params2;
503 __be32 rnr_nextrecvpsn;
504 __be32 xrcd;
505 __be32 cqn_recv;
506 __be64 db_rec_addr;
507 __be32 qkey;
508 __be32 rq_type_srqn;
509 __be32 rmsn;
510 __be16 hw_sq_wqe_counter;
511 __be16 sw_sq_wqe_counter;
512 __be16 hw_rcyclic_byte_counter;
513 __be16 hw_rq_counter;
514 __be16 sw_rcyclic_byte_counter;
515 __be16 sw_rq_counter;
516 u8 rsvd0[5];
517 u8 cgs;
518 u8 cs_req;
519 u8 cs_res;
520 __be64 dc_access_key;
521 u8 rsvd1[24];
522 };
523
524 static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
525 {
526 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
527 }
528
529 static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
530 {
531 return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
532 }
533
534 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
535 struct mlx5_core_qp *qp,
536 u32 *in,
537 int inlen);
538 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
539 u32 opt_param_mask, void *qpc,
540 struct mlx5_core_qp *qp);
541 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
542 struct mlx5_core_qp *qp);
543 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
544 u32 *out, int outlen);
545
546 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
547 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
548 void mlx5_init_qp_table(struct mlx5_core_dev *dev);
549 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
550 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
551 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
552 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
553 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
554 u8 context, int error);
555 #endif
556 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
557 struct mlx5_core_qp *rq);
558 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
559 struct mlx5_core_qp *rq);
560 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
561 struct mlx5_core_qp *sq);
562 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
563 struct mlx5_core_qp *sq);
564 int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id);
565 int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
566 int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
567 int reset, void *out, int out_size);
568 int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
569 u32 *out_of_buffer);
570
571 static inline const char *mlx5_qp_type_str(int type)
572 {
573 switch (type) {
574 case MLX5_QP_ST_RC: return "RC";
575 case MLX5_QP_ST_UC: return "C";
576 case MLX5_QP_ST_UD: return "UD";
577 case MLX5_QP_ST_XRC: return "XRC";
578 case MLX5_QP_ST_MLX: return "MLX";
579 case MLX5_QP_ST_QP0: return "QP0";
580 case MLX5_QP_ST_QP1: return "QP1";
581 case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
582 case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6";
583 case MLX5_QP_ST_SNIFFER: return "SNIFFER";
584 case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
585 case MLX5_QP_ST_PTP_1588: return "PTP_1588";
586 case MLX5_QP_ST_REG_UMR: return "REG_UMR";
587 default: return "Invalid transport type";
588 }
589 }
590
591 static inline const char *mlx5_qp_state_str(int state)
592 {
593 switch (state) {
594 case MLX5_QP_STATE_RST:
595 return "RST";
596 case MLX5_QP_STATE_INIT:
597 return "INIT";
598 case MLX5_QP_STATE_RTR:
599 return "RTR";
600 case MLX5_QP_STATE_RTS:
601 return "RTS";
602 case MLX5_QP_STATE_SQER:
603 return "SQER";
604 case MLX5_QP_STATE_SQD:
605 return "SQD";
606 case MLX5_QP_STATE_ERR:
607 return "ERR";
608 case MLX5_QP_STATE_SQ_DRAINING:
609 return "SQ_DRAINING";
610 case MLX5_QP_STATE_SUSPENDED:
611 return "SUSPENDED";
612 default: return "Invalid QP state";
613 }
614 }
615
616 #endif /* MLX5_QP_H */