]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/infiniband/hw/mlx5/mlx5_ib.h
Merge tag 'powerpc-4.5-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-hirsute-kernel.git] / drivers / infiniband / hw / mlx5 / mlx5_ib.h
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef MLX5_IB_H
34 #define MLX5_IB_H
35
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <rdma/ib_verbs.h>
39 #include <rdma/ib_smi.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/cq.h>
42 #include <linux/mlx5/qp.h>
43 #include <linux/mlx5/srq.h>
44 #include <linux/types.h>
45 #include <linux/mlx5/transobj.h>
46
47 #define mlx5_ib_dbg(dev, format, arg...) \
48 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
49 __LINE__, current->pid, ##arg)
50
51 #define mlx5_ib_err(dev, format, arg...) \
52 pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
53 __LINE__, current->pid, ##arg)
54
55 #define mlx5_ib_warn(dev, format, arg...) \
56 pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
57 __LINE__, current->pid, ##arg)
58
59 #define field_avail(type, fld, sz) (offsetof(type, fld) + \
60 sizeof(((type *)0)->fld) <= (sz))
61 #define MLX5_IB_DEFAULT_UIDX 0xffffff
62 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
63
64 enum {
65 MLX5_IB_MMAP_CMD_SHIFT = 8,
66 MLX5_IB_MMAP_CMD_MASK = 0xff,
67 };
68
69 enum mlx5_ib_mmap_cmd {
70 MLX5_IB_MMAP_REGULAR_PAGE = 0,
71 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
72 /* 5 is chosen in order to be compatible with old versions of libmlx5 */
73 MLX5_IB_MMAP_CORE_CLOCK = 5,
74 };
75
76 enum {
77 MLX5_RES_SCAT_DATA32_CQE = 0x1,
78 MLX5_RES_SCAT_DATA64_CQE = 0x2,
79 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
80 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
81 };
82
83 enum mlx5_ib_latency_class {
84 MLX5_IB_LATENCY_CLASS_LOW,
85 MLX5_IB_LATENCY_CLASS_MEDIUM,
86 MLX5_IB_LATENCY_CLASS_HIGH,
87 MLX5_IB_LATENCY_CLASS_FAST_PATH
88 };
89
90 enum mlx5_ib_mad_ifc_flags {
91 MLX5_MAD_IFC_IGNORE_MKEY = 1,
92 MLX5_MAD_IFC_IGNORE_BKEY = 2,
93 MLX5_MAD_IFC_NET_VIEW = 4,
94 };
95
96 enum {
97 MLX5_CROSS_CHANNEL_UUAR = 0,
98 };
99
100 enum {
101 MLX5_CQE_VERSION_V0,
102 MLX5_CQE_VERSION_V1,
103 };
104
105 struct mlx5_ib_ucontext {
106 struct ib_ucontext ibucontext;
107 struct list_head db_page_list;
108
109 /* protect doorbell record alloc/free
110 */
111 struct mutex db_page_mutex;
112 struct mlx5_uuar_info uuari;
113 u8 cqe_version;
114 /* Transport Domain number */
115 u32 tdn;
116 };
117
118 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
119 {
120 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
121 }
122
123 struct mlx5_ib_pd {
124 struct ib_pd ibpd;
125 u32 pdn;
126 };
127
128 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
129 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_IB_FLOW_MCAST_PRIO - 1)
130 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
131 #error "Invalid number of bypass priorities"
132 #endif
133 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
134
135 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
136 struct mlx5_ib_flow_prio {
137 struct mlx5_flow_table *flow_table;
138 unsigned int refcount;
139 };
140
141 struct mlx5_ib_flow_handler {
142 struct list_head list;
143 struct ib_flow ibflow;
144 unsigned int prio;
145 struct mlx5_flow_rule *rule;
146 };
147
148 struct mlx5_ib_flow_db {
149 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
150 /* Protect flow steering bypass flow tables
151 * when add/del flow rules.
152 * only single add/removal of flow steering rule could be done
153 * simultaneously.
154 */
155 struct mutex lock;
156 };
157
158 /* Use macros here so that don't have to duplicate
159 * enum ib_send_flags and enum ib_qp_type for low-level driver
160 */
161
162 #define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
163 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
164 #define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
165 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
166 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
167
168 struct wr_list {
169 u16 opcode;
170 u16 next;
171 };
172
173 struct mlx5_ib_wq {
174 u64 *wrid;
175 u32 *wr_data;
176 struct wr_list *w_list;
177 unsigned *wqe_head;
178 u16 unsig_count;
179
180 /* serialize post to the work queue
181 */
182 spinlock_t lock;
183 int wqe_cnt;
184 int max_post;
185 int max_gs;
186 int offset;
187 int wqe_shift;
188 unsigned head;
189 unsigned tail;
190 u16 cur_post;
191 u16 last_poll;
192 void *qend;
193 };
194
195 enum {
196 MLX5_QP_USER,
197 MLX5_QP_KERNEL,
198 MLX5_QP_EMPTY
199 };
200
201 /*
202 * Connect-IB can trigger up to four concurrent pagefaults
203 * per-QP.
204 */
205 enum mlx5_ib_pagefault_context {
206 MLX5_IB_PAGEFAULT_RESPONDER_READ,
207 MLX5_IB_PAGEFAULT_REQUESTOR_READ,
208 MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
209 MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
210 MLX5_IB_PAGEFAULT_CONTEXTS
211 };
212
213 static inline enum mlx5_ib_pagefault_context
214 mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
215 {
216 return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
217 }
218
219 struct mlx5_ib_pfault {
220 struct work_struct work;
221 struct mlx5_pagefault mpfault;
222 };
223
224 struct mlx5_ib_ubuffer {
225 struct ib_umem *umem;
226 int buf_size;
227 u64 buf_addr;
228 };
229
230 struct mlx5_ib_qp_base {
231 struct mlx5_ib_qp *container_mibqp;
232 struct mlx5_core_qp mqp;
233 struct mlx5_ib_ubuffer ubuffer;
234 };
235
236 struct mlx5_ib_qp_trans {
237 struct mlx5_ib_qp_base base;
238 u16 xrcdn;
239 u8 alt_port;
240 u8 atomic_rd_en;
241 u8 resp_depth;
242 };
243
244 struct mlx5_ib_rq {
245 struct mlx5_ib_qp_base base;
246 struct mlx5_ib_wq *rq;
247 struct mlx5_ib_ubuffer ubuffer;
248 struct mlx5_db *doorbell;
249 u32 tirn;
250 u8 state;
251 };
252
253 struct mlx5_ib_sq {
254 struct mlx5_ib_qp_base base;
255 struct mlx5_ib_wq *sq;
256 struct mlx5_ib_ubuffer ubuffer;
257 struct mlx5_db *doorbell;
258 u32 tisn;
259 u8 state;
260 };
261
262 struct mlx5_ib_raw_packet_qp {
263 struct mlx5_ib_sq sq;
264 struct mlx5_ib_rq rq;
265 };
266
267 struct mlx5_ib_qp {
268 struct ib_qp ibqp;
269 union {
270 struct mlx5_ib_qp_trans trans_qp;
271 struct mlx5_ib_raw_packet_qp raw_packet_qp;
272 };
273 struct mlx5_buf buf;
274
275 struct mlx5_db db;
276 struct mlx5_ib_wq rq;
277
278 u8 sq_signal_bits;
279 u8 fm_cache;
280 struct mlx5_ib_wq sq;
281
282 /* serialize qp state modifications
283 */
284 struct mutex mutex;
285 u32 flags;
286 u8 port;
287 u8 state;
288 int wq_sig;
289 int scat_cqe;
290 int max_inline_data;
291 struct mlx5_bf *bf;
292 int has_rq;
293
294 /* only for user space QPs. For kernel
295 * we have it from the bf object
296 */
297 int uuarn;
298
299 int create_type;
300
301 /* Store signature errors */
302 bool signature_en;
303
304 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
305 /*
306 * A flag that is true for QP's that are in a state that doesn't
307 * allow page faults, and shouldn't schedule any more faults.
308 */
309 int disable_page_faults;
310 /*
311 * The disable_page_faults_lock protects a QP's disable_page_faults
312 * field, allowing for a thread to atomically check whether the QP
313 * allows page faults, and if so schedule a page fault.
314 */
315 spinlock_t disable_page_faults_lock;
316 struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
317 #endif
318 };
319
320 struct mlx5_ib_cq_buf {
321 struct mlx5_buf buf;
322 struct ib_umem *umem;
323 int cqe_size;
324 int nent;
325 };
326
327 enum mlx5_ib_qp_flags {
328 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0,
329 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1,
330 MLX5_IB_QP_CROSS_CHANNEL = 1 << 2,
331 MLX5_IB_QP_MANAGED_SEND = 1 << 3,
332 MLX5_IB_QP_MANAGED_RECV = 1 << 4,
333 };
334
335 struct mlx5_umr_wr {
336 struct ib_send_wr wr;
337 union {
338 u64 virt_addr;
339 u64 offset;
340 } target;
341 struct ib_pd *pd;
342 unsigned int page_shift;
343 unsigned int npages;
344 u32 length;
345 int access_flags;
346 u32 mkey;
347 };
348
349 static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
350 {
351 return container_of(wr, struct mlx5_umr_wr, wr);
352 }
353
354 struct mlx5_shared_mr_info {
355 int mr_id;
356 struct ib_umem *umem;
357 };
358
359 struct mlx5_ib_cq {
360 struct ib_cq ibcq;
361 struct mlx5_core_cq mcq;
362 struct mlx5_ib_cq_buf buf;
363 struct mlx5_db db;
364
365 /* serialize access to the CQ
366 */
367 spinlock_t lock;
368
369 /* protect resize cq
370 */
371 struct mutex resize_mutex;
372 struct mlx5_ib_cq_buf *resize_buf;
373 struct ib_umem *resize_umem;
374 int cqe_size;
375 u32 create_flags;
376 };
377
378 struct mlx5_ib_srq {
379 struct ib_srq ibsrq;
380 struct mlx5_core_srq msrq;
381 struct mlx5_buf buf;
382 struct mlx5_db db;
383 u64 *wrid;
384 /* protect SRQ hanlding
385 */
386 spinlock_t lock;
387 int head;
388 int tail;
389 u16 wqe_ctr;
390 struct ib_umem *umem;
391 /* serialize arming a SRQ
392 */
393 struct mutex mutex;
394 int wq_sig;
395 };
396
397 struct mlx5_ib_xrcd {
398 struct ib_xrcd ibxrcd;
399 u32 xrcdn;
400 };
401
402 enum mlx5_ib_mtt_access_flags {
403 MLX5_IB_MTT_READ = (1 << 0),
404 MLX5_IB_MTT_WRITE = (1 << 1),
405 };
406
407 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
408
409 struct mlx5_ib_mr {
410 struct ib_mr ibmr;
411 void *descs;
412 dma_addr_t desc_map;
413 int ndescs;
414 int max_descs;
415 int desc_size;
416 struct mlx5_core_mr mmr;
417 struct ib_umem *umem;
418 struct mlx5_shared_mr_info *smr_info;
419 struct list_head list;
420 int order;
421 int umred;
422 int npages;
423 struct mlx5_ib_dev *dev;
424 struct mlx5_create_mkey_mbox_out out;
425 struct mlx5_core_sig_ctx *sig;
426 int live;
427 void *descs_alloc;
428 };
429
430 struct mlx5_ib_umr_context {
431 enum ib_wc_status status;
432 struct completion done;
433 };
434
435 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
436 {
437 context->status = -1;
438 init_completion(&context->done);
439 }
440
441 struct umr_common {
442 struct ib_pd *pd;
443 struct ib_cq *cq;
444 struct ib_qp *qp;
445 /* control access to UMR QP
446 */
447 struct semaphore sem;
448 };
449
450 enum {
451 MLX5_FMR_INVALID,
452 MLX5_FMR_VALID,
453 MLX5_FMR_BUSY,
454 };
455
456 struct mlx5_cache_ent {
457 struct list_head head;
458 /* sync access to the cahce entry
459 */
460 spinlock_t lock;
461
462
463 struct dentry *dir;
464 char name[4];
465 u32 order;
466 u32 size;
467 u32 cur;
468 u32 miss;
469 u32 limit;
470
471 struct dentry *fsize;
472 struct dentry *fcur;
473 struct dentry *fmiss;
474 struct dentry *flimit;
475
476 struct mlx5_ib_dev *dev;
477 struct work_struct work;
478 struct delayed_work dwork;
479 int pending;
480 };
481
482 struct mlx5_mr_cache {
483 struct workqueue_struct *wq;
484 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
485 int stopped;
486 struct dentry *root;
487 unsigned long last_add;
488 };
489
490 struct mlx5_ib_resources {
491 struct ib_cq *c0;
492 struct ib_xrcd *x0;
493 struct ib_xrcd *x1;
494 struct ib_pd *p0;
495 struct ib_srq *s0;
496 struct ib_srq *s1;
497 };
498
499 struct mlx5_roce {
500 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
501 * netdev pointer
502 */
503 rwlock_t netdev_lock;
504 struct net_device *netdev;
505 struct notifier_block nb;
506 };
507
508 struct mlx5_ib_dev {
509 struct ib_device ib_dev;
510 struct mlx5_core_dev *mdev;
511 struct mlx5_roce roce;
512 MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
513 int num_ports;
514 /* serialize update of capability mask
515 */
516 struct mutex cap_mask_mutex;
517 bool ib_active;
518 struct umr_common umrc;
519 /* sync used page count stats
520 */
521 struct mlx5_ib_resources devr;
522 struct mlx5_mr_cache cache;
523 struct timer_list delay_timer;
524 int fill_delay;
525 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
526 struct ib_odp_caps odp_caps;
527 /*
528 * Sleepable RCU that prevents destruction of MRs while they are still
529 * being used by a page fault handler.
530 */
531 struct srcu_struct mr_srcu;
532 #endif
533 struct mlx5_ib_flow_db flow_db;
534 };
535
536 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
537 {
538 return container_of(mcq, struct mlx5_ib_cq, mcq);
539 }
540
541 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
542 {
543 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
544 }
545
546 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
547 {
548 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
549 }
550
551 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
552 {
553 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
554 }
555
556 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
557 {
558 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
559 }
560
561 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
562 {
563 return container_of(mmr, struct mlx5_ib_mr, mmr);
564 }
565
566 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
567 {
568 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
569 }
570
571 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
572 {
573 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
574 }
575
576 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
577 {
578 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
579 }
580
581 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
582 {
583 return container_of(msrq, struct mlx5_ib_srq, msrq);
584 }
585
586 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
587 {
588 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
589 }
590
591 struct mlx5_ib_ah {
592 struct ib_ah ibah;
593 struct mlx5_av av;
594 };
595
596 static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
597 {
598 return container_of(ibah, struct mlx5_ib_ah, ibah);
599 }
600
601 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
602 struct mlx5_db *db);
603 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
604 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
605 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
606 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
607 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
608 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
609 const void *in_mad, void *response_mad);
610 struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
611 int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
612 int mlx5_ib_destroy_ah(struct ib_ah *ah);
613 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
614 struct ib_srq_init_attr *init_attr,
615 struct ib_udata *udata);
616 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
617 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
618 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
619 int mlx5_ib_destroy_srq(struct ib_srq *srq);
620 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
621 struct ib_recv_wr **bad_wr);
622 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
623 struct ib_qp_init_attr *init_attr,
624 struct ib_udata *udata);
625 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
626 int attr_mask, struct ib_udata *udata);
627 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
628 struct ib_qp_init_attr *qp_init_attr);
629 int mlx5_ib_destroy_qp(struct ib_qp *qp);
630 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
631 struct ib_send_wr **bad_wr);
632 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
633 struct ib_recv_wr **bad_wr);
634 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
635 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
636 void *buffer, u32 length,
637 struct mlx5_ib_qp_base *base);
638 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
639 const struct ib_cq_init_attr *attr,
640 struct ib_ucontext *context,
641 struct ib_udata *udata);
642 int mlx5_ib_destroy_cq(struct ib_cq *cq);
643 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
644 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
645 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
646 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
647 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
648 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
649 u64 virt_addr, int access_flags,
650 struct ib_udata *udata);
651 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
652 int npages, int zap);
653 int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
654 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
655 enum ib_mr_type mr_type,
656 u32 max_num_sg);
657 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
658 struct scatterlist *sg,
659 int sg_nents);
660 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
661 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
662 const struct ib_mad_hdr *in, size_t in_mad_size,
663 struct ib_mad_hdr *out, size_t *out_mad_size,
664 u16 *out_mad_pkey_index);
665 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
666 struct ib_ucontext *context,
667 struct ib_udata *udata);
668 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
669 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
670 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
671 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
672 struct ib_smp *out_mad);
673 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
674 __be64 *sys_image_guid);
675 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
676 u16 *max_pkeys);
677 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
678 u32 *vendor_id);
679 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
680 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
681 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
682 u16 *pkey);
683 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
684 union ib_gid *gid);
685 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
686 struct ib_port_attr *props);
687 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
688 struct ib_port_attr *props);
689 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
690 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
691 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
692 int *ncont, int *order);
693 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
694 int page_shift, size_t offset, size_t num_pages,
695 __be64 *pas, int access_flags);
696 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
697 int page_shift, __be64 *pas, int access_flags);
698 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
699 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
700 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
701 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
702 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
703 void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
704 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
705 struct ib_mr_status *mr_status);
706
707 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
708 extern struct workqueue_struct *mlx5_ib_page_fault_wq;
709
710 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
711 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
712 struct mlx5_ib_pfault *pfault);
713 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
714 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
715 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
716 int __init mlx5_ib_odp_init(void);
717 void mlx5_ib_odp_cleanup(void);
718 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
719 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
720 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
721 unsigned long end);
722
723 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
724 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
725 {
726 return;
727 }
728
729 static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
730 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
731 static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
732 static inline int mlx5_ib_odp_init(void) { return 0; }
733 static inline void mlx5_ib_odp_cleanup(void) {}
734 static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
735 static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
736
737 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
738
739 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
740 int index);
741
742 static inline void init_query_mad(struct ib_smp *mad)
743 {
744 mad->base_version = 1;
745 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
746 mad->class_version = 1;
747 mad->method = IB_MGMT_METHOD_GET;
748 }
749
750 static inline u8 convert_access(int acc)
751 {
752 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
753 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
754 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
755 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
756 MLX5_PERM_LOCAL_READ;
757 }
758
759 static inline int is_qp1(enum ib_qp_type qp_type)
760 {
761 return qp_type == IB_QPT_GSI;
762 }
763
764 #define MLX5_MAX_UMR_SHIFT 16
765 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
766
767 static inline u32 check_cq_create_flags(u32 flags)
768 {
769 /*
770 * It returns non-zero value for unsupported CQ
771 * create flags, otherwise it returns zero.
772 */
773 return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN |
774 IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
775 }
776
777 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
778 u32 *user_index)
779 {
780 if (cqe_version) {
781 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
782 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
783 return -EINVAL;
784 *user_index = cmd_uidx;
785 } else {
786 *user_index = MLX5_IB_DEFAULT_UIDX;
787 }
788
789 return 0;
790 }
791 #endif /* MLX5_IB_H */