]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/mlx5/driver.h
Merge remote-tracking branches 'spi/topic/ti-qspi' and 'spi/topic/topcliff-pch' into...
[mirror_ubuntu-artful-kernel.git] / include / linux / mlx5 / driver.h
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef MLX5_DRIVER_H
34 #define MLX5_DRIVER_H
35
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/spinlock_types.h>
40 #include <linux/semaphore.h>
41 #include <linux/slab.h>
42 #include <linux/vmalloc.h>
43 #include <linux/radix-tree.h>
44 #include <linux/workqueue.h>
45 #include <linux/interrupt.h>
46
47 #include <linux/mlx5/device.h>
48 #include <linux/mlx5/doorbell.h>
49 #include <linux/mlx5/srq.h>
50
51 enum {
52 MLX5_BOARD_ID_LEN = 64,
53 MLX5_MAX_NAME_LEN = 16,
54 };
55
56 enum {
57 /* one minute for the sake of bringup. Generally, commands must always
58 * complete and we may need to increase this timeout value
59 */
60 MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
61 MLX5_CMD_WQ_MAX_NAME = 32,
62 };
63
64 enum {
65 CMD_OWNER_SW = 0x0,
66 CMD_OWNER_HW = 0x1,
67 CMD_STATUS_SUCCESS = 0,
68 };
69
70 enum mlx5_sqp_t {
71 MLX5_SQP_SMI = 0,
72 MLX5_SQP_GSI = 1,
73 MLX5_SQP_IEEE_1588 = 2,
74 MLX5_SQP_SNIFFER = 3,
75 MLX5_SQP_SYNC_UMR = 4,
76 };
77
78 enum {
79 MLX5_MAX_PORTS = 2,
80 };
81
82 enum {
83 MLX5_EQ_VEC_PAGES = 0,
84 MLX5_EQ_VEC_CMD = 1,
85 MLX5_EQ_VEC_ASYNC = 2,
86 MLX5_EQ_VEC_COMP_BASE,
87 };
88
89 enum {
90 MLX5_MAX_IRQ_NAME = 32
91 };
92
93 enum {
94 MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
95 MLX5_ATOMIC_MODE_CX = 2 << 16,
96 MLX5_ATOMIC_MODE_8B = 3 << 16,
97 MLX5_ATOMIC_MODE_16B = 4 << 16,
98 MLX5_ATOMIC_MODE_32B = 5 << 16,
99 MLX5_ATOMIC_MODE_64B = 6 << 16,
100 MLX5_ATOMIC_MODE_128B = 7 << 16,
101 MLX5_ATOMIC_MODE_256B = 8 << 16,
102 };
103
104 enum {
105 MLX5_REG_QETCR = 0x4005,
106 MLX5_REG_QTCT = 0x400a,
107 MLX5_REG_DCBX_PARAM = 0x4020,
108 MLX5_REG_DCBX_APP = 0x4021,
109 MLX5_REG_PCAP = 0x5001,
110 MLX5_REG_PMTU = 0x5003,
111 MLX5_REG_PTYS = 0x5004,
112 MLX5_REG_PAOS = 0x5006,
113 MLX5_REG_PFCC = 0x5007,
114 MLX5_REG_PPCNT = 0x5008,
115 MLX5_REG_PMAOS = 0x5012,
116 MLX5_REG_PUDE = 0x5009,
117 MLX5_REG_PMPE = 0x5010,
118 MLX5_REG_PELC = 0x500e,
119 MLX5_REG_PVLC = 0x500f,
120 MLX5_REG_PCMR = 0x5041,
121 MLX5_REG_PMLP = 0x5002,
122 MLX5_REG_NODE_DESC = 0x6001,
123 MLX5_REG_HOST_ENDIANNESS = 0x7004,
124 MLX5_REG_MCIA = 0x9014,
125 MLX5_REG_MLCR = 0x902b,
126 MLX5_REG_MPCNT = 0x9051,
127 };
128
129 enum mlx5_dcbx_oper_mode {
130 MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0,
131 MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
132 };
133
134 enum {
135 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
136 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
137 };
138
139 enum mlx5_page_fault_resume_flags {
140 MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
141 MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
142 MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
143 MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
144 };
145
146 enum dbg_rsc_type {
147 MLX5_DBG_RSC_QP,
148 MLX5_DBG_RSC_EQ,
149 MLX5_DBG_RSC_CQ,
150 };
151
152 struct mlx5_field_desc {
153 struct dentry *dent;
154 int i;
155 };
156
157 struct mlx5_rsc_debug {
158 struct mlx5_core_dev *dev;
159 void *object;
160 enum dbg_rsc_type type;
161 struct dentry *root;
162 struct mlx5_field_desc fields[0];
163 };
164
165 enum mlx5_dev_event {
166 MLX5_DEV_EVENT_SYS_ERROR,
167 MLX5_DEV_EVENT_PORT_UP,
168 MLX5_DEV_EVENT_PORT_DOWN,
169 MLX5_DEV_EVENT_PORT_INITIALIZED,
170 MLX5_DEV_EVENT_LID_CHANGE,
171 MLX5_DEV_EVENT_PKEY_CHANGE,
172 MLX5_DEV_EVENT_GUID_CHANGE,
173 MLX5_DEV_EVENT_CLIENT_REREG,
174 };
175
176 enum mlx5_port_status {
177 MLX5_PORT_UP = 1,
178 MLX5_PORT_DOWN = 2,
179 };
180
181 struct mlx5_uuar_info {
182 struct mlx5_uar *uars;
183 int num_uars;
184 int num_low_latency_uuars;
185 unsigned long *bitmap;
186 unsigned int *count;
187 struct mlx5_bf *bfs;
188
189 /*
190 * protect uuar allocation data structs
191 */
192 struct mutex lock;
193 u32 ver;
194 };
195
196 struct mlx5_bf {
197 void __iomem *reg;
198 void __iomem *regreg;
199 int buf_size;
200 struct mlx5_uar *uar;
201 unsigned long offset;
202 int need_lock;
203 /* protect blue flame buffer selection when needed
204 */
205 spinlock_t lock;
206
207 /* serialize 64 bit writes when done as two 32 bit accesses
208 */
209 spinlock_t lock32;
210 int uuarn;
211 };
212
213 struct mlx5_cmd_first {
214 __be32 data[4];
215 };
216
217 struct mlx5_cmd_msg {
218 struct list_head list;
219 struct cmd_msg_cache *parent;
220 u32 len;
221 struct mlx5_cmd_first first;
222 struct mlx5_cmd_mailbox *next;
223 };
224
225 struct mlx5_cmd_debug {
226 struct dentry *dbg_root;
227 struct dentry *dbg_in;
228 struct dentry *dbg_out;
229 struct dentry *dbg_outlen;
230 struct dentry *dbg_status;
231 struct dentry *dbg_run;
232 void *in_msg;
233 void *out_msg;
234 u8 status;
235 u16 inlen;
236 u16 outlen;
237 };
238
239 struct cmd_msg_cache {
240 /* protect block chain allocations
241 */
242 spinlock_t lock;
243 struct list_head head;
244 unsigned int max_inbox_size;
245 unsigned int num_ent;
246 };
247
248 enum {
249 MLX5_NUM_COMMAND_CACHES = 5,
250 };
251
252 struct mlx5_cmd_stats {
253 u64 sum;
254 u64 n;
255 struct dentry *root;
256 struct dentry *avg;
257 struct dentry *count;
258 /* protect command average calculations */
259 spinlock_t lock;
260 };
261
262 struct mlx5_cmd {
263 void *cmd_alloc_buf;
264 dma_addr_t alloc_dma;
265 int alloc_size;
266 void *cmd_buf;
267 dma_addr_t dma;
268 u16 cmdif_rev;
269 u8 log_sz;
270 u8 log_stride;
271 int max_reg_cmds;
272 int events;
273 u32 __iomem *vector;
274
275 /* protect command queue allocations
276 */
277 spinlock_t alloc_lock;
278
279 /* protect token allocations
280 */
281 spinlock_t token_lock;
282 u8 token;
283 unsigned long bitmask;
284 char wq_name[MLX5_CMD_WQ_MAX_NAME];
285 struct workqueue_struct *wq;
286 struct semaphore sem;
287 struct semaphore pages_sem;
288 int mode;
289 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
290 struct pci_pool *pool;
291 struct mlx5_cmd_debug dbg;
292 struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
293 int checksum_disabled;
294 struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
295 };
296
297 struct mlx5_port_caps {
298 int gid_table_len;
299 int pkey_table_len;
300 u8 ext_port_cap;
301 };
302
303 struct mlx5_cmd_mailbox {
304 void *buf;
305 dma_addr_t dma;
306 struct mlx5_cmd_mailbox *next;
307 };
308
309 struct mlx5_buf_list {
310 void *buf;
311 dma_addr_t map;
312 };
313
314 struct mlx5_buf {
315 struct mlx5_buf_list direct;
316 int npages;
317 int size;
318 u8 page_shift;
319 };
320
321 struct mlx5_frag_buf {
322 struct mlx5_buf_list *frags;
323 int npages;
324 int size;
325 u8 page_shift;
326 };
327
328 struct mlx5_eq_tasklet {
329 struct list_head list;
330 struct list_head process_list;
331 struct tasklet_struct task;
332 /* lock on completion tasklet list */
333 spinlock_t lock;
334 };
335
336 struct mlx5_eq {
337 struct mlx5_core_dev *dev;
338 __be32 __iomem *doorbell;
339 u32 cons_index;
340 struct mlx5_buf buf;
341 int size;
342 unsigned int irqn;
343 u8 eqn;
344 int nent;
345 u64 mask;
346 struct list_head list;
347 int index;
348 struct mlx5_rsc_debug *dbg;
349 struct mlx5_eq_tasklet tasklet_ctx;
350 };
351
352 struct mlx5_core_psv {
353 u32 psv_idx;
354 struct psv_layout {
355 u32 pd;
356 u16 syndrome;
357 u16 reserved;
358 u16 bg;
359 u16 app_tag;
360 u32 ref_tag;
361 } psv;
362 };
363
364 struct mlx5_core_sig_ctx {
365 struct mlx5_core_psv psv_memory;
366 struct mlx5_core_psv psv_wire;
367 struct ib_sig_err err_item;
368 bool sig_status_checked;
369 bool sig_err_exists;
370 u32 sigerr_count;
371 };
372
373 struct mlx5_core_mkey {
374 u64 iova;
375 u64 size;
376 u32 key;
377 u32 pd;
378 };
379
380 enum mlx5_res_type {
381 MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
382 MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
383 MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
384 MLX5_RES_SRQ = 3,
385 MLX5_RES_XSRQ = 4,
386 };
387
388 struct mlx5_core_rsc_common {
389 enum mlx5_res_type res;
390 atomic_t refcount;
391 struct completion free;
392 };
393
394 struct mlx5_core_srq {
395 struct mlx5_core_rsc_common common; /* must be first */
396 u32 srqn;
397 int max;
398 int max_gs;
399 int max_avail_gather;
400 int wqe_shift;
401 void (*event) (struct mlx5_core_srq *, enum mlx5_event);
402
403 atomic_t refcount;
404 struct completion free;
405 };
406
407 struct mlx5_eq_table {
408 void __iomem *update_ci;
409 void __iomem *update_arm_ci;
410 struct list_head comp_eqs_list;
411 struct mlx5_eq pages_eq;
412 struct mlx5_eq async_eq;
413 struct mlx5_eq cmd_eq;
414 int num_comp_vectors;
415 /* protect EQs list
416 */
417 spinlock_t lock;
418 };
419
420 struct mlx5_uar {
421 u32 index;
422 struct list_head bf_list;
423 unsigned free_bf_bmap;
424 void __iomem *bf_map;
425 void __iomem *map;
426 };
427
428
429 struct mlx5_core_health {
430 struct health_buffer __iomem *health;
431 __be32 __iomem *health_counter;
432 struct timer_list timer;
433 u32 prev;
434 int miss_counter;
435 bool sick;
436 /* wq spinlock to synchronize draining */
437 spinlock_t wq_lock;
438 struct workqueue_struct *wq;
439 unsigned long flags;
440 struct work_struct work;
441 struct delayed_work recover_work;
442 };
443
444 struct mlx5_cq_table {
445 /* protect radix tree
446 */
447 spinlock_t lock;
448 struct radix_tree_root tree;
449 };
450
451 struct mlx5_qp_table {
452 /* protect radix tree
453 */
454 spinlock_t lock;
455 struct radix_tree_root tree;
456 };
457
458 struct mlx5_srq_table {
459 /* protect radix tree
460 */
461 spinlock_t lock;
462 struct radix_tree_root tree;
463 };
464
465 struct mlx5_mkey_table {
466 /* protect radix tree
467 */
468 rwlock_t lock;
469 struct radix_tree_root tree;
470 };
471
472 struct mlx5_vf_context {
473 int enabled;
474 };
475
476 struct mlx5_core_sriov {
477 struct mlx5_vf_context *vfs_ctx;
478 int num_vfs;
479 int enabled_vfs;
480 };
481
482 struct mlx5_irq_info {
483 cpumask_var_t mask;
484 char name[MLX5_MAX_IRQ_NAME];
485 };
486
487 struct mlx5_fc_stats {
488 struct rb_root counters;
489 struct list_head addlist;
490 /* protect addlist add/splice operations */
491 spinlock_t addlist_lock;
492
493 struct workqueue_struct *wq;
494 struct delayed_work work;
495 unsigned long next_query;
496 };
497
498 struct mlx5_eswitch;
499 struct mlx5_lag;
500
501 struct mlx5_rl_entry {
502 u32 rate;
503 u16 index;
504 u16 refcount;
505 };
506
507 struct mlx5_rl_table {
508 /* protect rate limit table */
509 struct mutex rl_lock;
510 u16 max_size;
511 u32 max_rate;
512 u32 min_rate;
513 struct mlx5_rl_entry *rl_entry;
514 };
515
516 enum port_module_event_status_type {
517 MLX5_MODULE_STATUS_PLUGGED = 0x1,
518 MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
519 MLX5_MODULE_STATUS_ERROR = 0x3,
520 MLX5_MODULE_STATUS_NUM = 0x3,
521 };
522
523 enum port_module_event_error_type {
524 MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
525 MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
526 MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
527 MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
528 MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
529 MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
530 MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
531 MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
532 MLX5_MODULE_EVENT_ERROR_UNKNOWN,
533 MLX5_MODULE_EVENT_ERROR_NUM,
534 };
535
536 struct mlx5_port_module_event_stats {
537 u64 status_counters[MLX5_MODULE_STATUS_NUM];
538 u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
539 };
540
541 struct mlx5_priv {
542 char name[MLX5_MAX_NAME_LEN];
543 struct mlx5_eq_table eq_table;
544 struct msix_entry *msix_arr;
545 struct mlx5_irq_info *irq_info;
546 struct mlx5_uuar_info uuari;
547 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
548
549 /* pages stuff */
550 struct workqueue_struct *pg_wq;
551 struct rb_root page_root;
552 int fw_pages;
553 atomic_t reg_pages;
554 struct list_head free_list;
555 int vfs_pages;
556
557 struct mlx5_core_health health;
558
559 struct mlx5_srq_table srq_table;
560
561 /* start: qp staff */
562 struct mlx5_qp_table qp_table;
563 struct dentry *qp_debugfs;
564 struct dentry *eq_debugfs;
565 struct dentry *cq_debugfs;
566 struct dentry *cmdif_debugfs;
567 /* end: qp staff */
568
569 /* start: cq staff */
570 struct mlx5_cq_table cq_table;
571 /* end: cq staff */
572
573 /* start: mkey staff */
574 struct mlx5_mkey_table mkey_table;
575 /* end: mkey staff */
576
577 /* start: alloc staff */
578 /* protect buffer alocation according to numa node */
579 struct mutex alloc_mutex;
580 int numa_node;
581
582 struct mutex pgdir_mutex;
583 struct list_head pgdir_list;
584 /* end: alloc staff */
585 struct dentry *dbg_root;
586
587 /* protect mkey key part */
588 spinlock_t mkey_lock;
589 u8 mkey_key;
590
591 struct list_head dev_list;
592 struct list_head ctx_list;
593 spinlock_t ctx_lock;
594
595 struct mlx5_flow_steering *steering;
596 struct mlx5_eswitch *eswitch;
597 struct mlx5_core_sriov sriov;
598 struct mlx5_lag *lag;
599 unsigned long pci_dev_data;
600 struct mlx5_fc_stats fc_stats;
601 struct mlx5_rl_table rl_table;
602
603 struct mlx5_port_module_event_stats pme_stats;
604 };
605
606 enum mlx5_device_state {
607 MLX5_DEVICE_STATE_UP,
608 MLX5_DEVICE_STATE_INTERNAL_ERROR,
609 };
610
611 enum mlx5_interface_state {
612 MLX5_INTERFACE_STATE_DOWN = BIT(0),
613 MLX5_INTERFACE_STATE_UP = BIT(1),
614 MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
615 };
616
617 enum mlx5_pci_status {
618 MLX5_PCI_STATUS_DISABLED,
619 MLX5_PCI_STATUS_ENABLED,
620 };
621
622 struct mlx5_td {
623 struct list_head tirs_list;
624 u32 tdn;
625 };
626
627 struct mlx5e_resources {
628 struct mlx5_uar cq_uar;
629 u32 pdn;
630 struct mlx5_td td;
631 struct mlx5_core_mkey mkey;
632 };
633
634 struct mlx5_core_dev {
635 struct pci_dev *pdev;
636 /* sync pci state */
637 struct mutex pci_status_mutex;
638 enum mlx5_pci_status pci_status;
639 u8 rev_id;
640 char board_id[MLX5_BOARD_ID_LEN];
641 struct mlx5_cmd cmd;
642 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
643 u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
644 u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
645 phys_addr_t iseg_base;
646 struct mlx5_init_seg __iomem *iseg;
647 enum mlx5_device_state state;
648 /* sync interface state */
649 struct mutex intf_state_mutex;
650 unsigned long intf_state;
651 void (*event) (struct mlx5_core_dev *dev,
652 enum mlx5_dev_event event,
653 unsigned long param);
654 struct mlx5_priv priv;
655 struct mlx5_profile *profile;
656 atomic_t num_qps;
657 u32 issi;
658 struct mlx5e_resources mlx5e_res;
659 #ifdef CONFIG_RFS_ACCEL
660 struct cpu_rmap *rmap;
661 #endif
662 };
663
664 struct mlx5_db {
665 __be32 *db;
666 union {
667 struct mlx5_db_pgdir *pgdir;
668 struct mlx5_ib_user_db_page *user_page;
669 } u;
670 dma_addr_t dma;
671 int index;
672 };
673
674 enum {
675 MLX5_COMP_EQ_SIZE = 1024,
676 };
677
678 enum {
679 MLX5_PTYS_IB = 1 << 0,
680 MLX5_PTYS_EN = 1 << 2,
681 };
682
683 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
684
685 struct mlx5_cmd_work_ent {
686 struct mlx5_cmd_msg *in;
687 struct mlx5_cmd_msg *out;
688 void *uout;
689 int uout_size;
690 mlx5_cmd_cbk_t callback;
691 struct delayed_work cb_timeout_work;
692 void *context;
693 int idx;
694 struct completion done;
695 struct mlx5_cmd *cmd;
696 struct work_struct work;
697 struct mlx5_cmd_layout *lay;
698 int ret;
699 int page_queue;
700 u8 status;
701 u8 token;
702 u64 ts1;
703 u64 ts2;
704 u16 op;
705 };
706
707 struct mlx5_pas {
708 u64 pa;
709 u8 log_sz;
710 };
711
712 enum port_state_policy {
713 MLX5_POLICY_DOWN = 0,
714 MLX5_POLICY_UP = 1,
715 MLX5_POLICY_FOLLOW = 2,
716 MLX5_POLICY_INVALID = 0xffffffff
717 };
718
719 enum phy_port_state {
720 MLX5_AAA_111
721 };
722
723 struct mlx5_hca_vport_context {
724 u32 field_select;
725 bool sm_virt_aware;
726 bool has_smi;
727 bool has_raw;
728 enum port_state_policy policy;
729 enum phy_port_state phys_state;
730 enum ib_port_state vport_state;
731 u8 port_physical_state;
732 u64 sys_image_guid;
733 u64 port_guid;
734 u64 node_guid;
735 u32 cap_mask1;
736 u32 cap_mask1_perm;
737 u32 cap_mask2;
738 u32 cap_mask2_perm;
739 u16 lid;
740 u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
741 u8 lmc;
742 u8 subnet_timeout;
743 u16 sm_lid;
744 u8 sm_sl;
745 u16 qkey_violation_counter;
746 u16 pkey_violation_counter;
747 bool grh_required;
748 };
749
750 static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
751 {
752 return buf->direct.buf + offset;
753 }
754
755 extern struct workqueue_struct *mlx5_core_wq;
756
757 #define STRUCT_FIELD(header, field) \
758 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
759 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
760
761 static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
762 {
763 return pci_get_drvdata(pdev);
764 }
765
766 extern struct dentry *mlx5_debugfs_root;
767
768 static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
769 {
770 return ioread32be(&dev->iseg->fw_rev) & 0xffff;
771 }
772
773 static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
774 {
775 return ioread32be(&dev->iseg->fw_rev) >> 16;
776 }
777
778 static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
779 {
780 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
781 }
782
783 static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
784 {
785 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
786 }
787
788 static inline void *mlx5_vzalloc(unsigned long size)
789 {
790 void *rtn;
791
792 rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
793 if (!rtn)
794 rtn = vzalloc(size);
795 return rtn;
796 }
797
798 static inline u32 mlx5_base_mkey(const u32 key)
799 {
800 return key & 0xffffff00u;
801 }
802
803 int mlx5_cmd_init(struct mlx5_core_dev *dev);
804 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
805 void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
806 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
807
808 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
809 int out_size);
810 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
811 void *out, int out_size, mlx5_cmd_cbk_t callback,
812 void *context);
813 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
814
815 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
816 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
817 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
818 int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
819 int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
820 int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
821 bool map_wc);
822 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
823 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
824 int mlx5_health_init(struct mlx5_core_dev *dev);
825 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
826 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
827 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
828 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
829 struct mlx5_buf *buf, int node);
830 int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
831 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
832 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
833 struct mlx5_frag_buf *buf, int node);
834 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
835 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
836 gfp_t flags, int npages);
837 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
838 struct mlx5_cmd_mailbox *head);
839 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
840 struct mlx5_srq_attr *in);
841 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
842 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
843 struct mlx5_srq_attr *out);
844 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
845 u16 lwm, int is_srq);
846 void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
847 void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
848 int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
849 struct mlx5_core_mkey *mkey,
850 u32 *in, int inlen,
851 u32 *out, int outlen,
852 mlx5_cmd_cbk_t callback, void *context);
853 int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
854 struct mlx5_core_mkey *mkey,
855 u32 *in, int inlen);
856 int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
857 struct mlx5_core_mkey *mkey);
858 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
859 u32 *out, int outlen);
860 int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
861 u32 *mkey);
862 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
863 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
864 int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
865 u16 opmod, u8 port);
866 void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
867 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
868 int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
869 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
870 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
871 s32 npages);
872 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
873 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
874 void mlx5_register_debugfs(void);
875 void mlx5_unregister_debugfs(void);
876 int mlx5_eq_init(struct mlx5_core_dev *dev);
877 void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
878 void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
879 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
880 void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
881 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
882 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
883 void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
884 #endif
885 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
886 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
887 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
888 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
889 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
890 int nent, u64 mask, const char *name, struct mlx5_uar *uar);
891 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
892 int mlx5_start_eqs(struct mlx5_core_dev *dev);
893 int mlx5_stop_eqs(struct mlx5_core_dev *dev);
894 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
895 unsigned int *irqn);
896 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
897 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
898
899 int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
900 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
901 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
902 int size_in, void *data_out, int size_out,
903 u16 reg_num, int arg, int write);
904
905 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
906 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
907 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
908 u32 *out, int outlen);
909 int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
910 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
911 int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
912 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
913 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
914 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
915 int node);
916 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
917
918 const char *mlx5_command_str(int command);
919 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
920 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
921 int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
922 int npsvs, u32 *sig_index);
923 int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
924 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
925 int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
926 struct mlx5_odp_caps *odp_caps);
927 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
928 u8 port_num, void *out, size_t sz);
929
930 int mlx5_init_rl_table(struct mlx5_core_dev *dev);
931 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
932 int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
933 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
934 bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
935
936 static inline int fw_initializing(struct mlx5_core_dev *dev)
937 {
938 return ioread32be(&dev->iseg->initializing) >> 31;
939 }
940
941 static inline u32 mlx5_mkey_to_idx(u32 mkey)
942 {
943 return mkey >> 8;
944 }
945
946 static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
947 {
948 return mkey_idx << 8;
949 }
950
951 static inline u8 mlx5_mkey_variant(u32 mkey)
952 {
953 return mkey & 0xff;
954 }
955
956 enum {
957 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
958 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
959 };
960
961 enum {
962 MAX_MR_CACHE_ENTRIES = 16,
963 };
964
965 enum {
966 MLX5_INTERFACE_PROTOCOL_IB = 0,
967 MLX5_INTERFACE_PROTOCOL_ETH = 1,
968 };
969
970 struct mlx5_interface {
971 void * (*add)(struct mlx5_core_dev *dev);
972 void (*remove)(struct mlx5_core_dev *dev, void *context);
973 int (*attach)(struct mlx5_core_dev *dev, void *context);
974 void (*detach)(struct mlx5_core_dev *dev, void *context);
975 void (*event)(struct mlx5_core_dev *dev, void *context,
976 enum mlx5_dev_event event, unsigned long param);
977 void * (*get_dev)(void *context);
978 int protocol;
979 struct list_head list;
980 };
981
982 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
983 int mlx5_register_interface(struct mlx5_interface *intf);
984 void mlx5_unregister_interface(struct mlx5_interface *intf);
985 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
986
987 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
988 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
989 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
990 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
991
992 struct mlx5_profile {
993 u64 mask;
994 u8 log_max_qp;
995 struct {
996 int size;
997 int limit;
998 } mr_cache[MAX_MR_CACHE_ENTRIES];
999 };
1000
1001 enum {
1002 MLX5_PCI_DEV_IS_VF = 1 << 0,
1003 };
1004
1005 static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
1006 {
1007 return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
1008 }
1009
1010 static inline int mlx5_get_gid_table_len(u16 param)
1011 {
1012 if (param > 4) {
1013 pr_warn("gid table length is zero\n");
1014 return 0;
1015 }
1016
1017 return 8 * (1 << param);
1018 }
1019
1020 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
1021 {
1022 return !!(dev->priv.rl_table.max_size);
1023 }
1024
1025 enum {
1026 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
1027 };
1028
1029 #endif /* MLX5_DRIVER_H */