2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/spinlock_types.h>
40 #include <linux/semaphore.h>
41 #include <linux/slab.h>
42 #include <linux/vmalloc.h>
43 #include <linux/radix-tree.h>
44 #include <linux/workqueue.h>
45 #include <linux/mempool.h>
46 #include <linux/interrupt.h>
48 #include <linux/mlx5/device.h>
49 #include <linux/mlx5/doorbell.h>
50 #include <linux/mlx5/srq.h>
53 MLX5_BOARD_ID_LEN
= 64,
54 MLX5_MAX_NAME_LEN
= 16,
58 /* one minute for the sake of bringup. Generally, commands must always
59 * complete and we may need to increase this timeout value
61 MLX5_CMD_TIMEOUT_MSEC
= 60 * 1000,
62 MLX5_CMD_WQ_MAX_NAME
= 32,
68 CMD_STATUS_SUCCESS
= 0,
74 MLX5_SQP_IEEE_1588
= 2,
76 MLX5_SQP_SYNC_UMR
= 4,
84 MLX5_EQ_VEC_PAGES
= 0,
86 MLX5_EQ_VEC_ASYNC
= 2,
87 MLX5_EQ_VEC_PFAULT
= 3,
88 MLX5_EQ_VEC_COMP_BASE
,
92 MLX5_MAX_IRQ_NAME
= 32
96 MLX5_ATOMIC_MODE_IB_COMP
= 1 << 16,
97 MLX5_ATOMIC_MODE_CX
= 2 << 16,
98 MLX5_ATOMIC_MODE_8B
= 3 << 16,
99 MLX5_ATOMIC_MODE_16B
= 4 << 16,
100 MLX5_ATOMIC_MODE_32B
= 5 << 16,
101 MLX5_ATOMIC_MODE_64B
= 6 << 16,
102 MLX5_ATOMIC_MODE_128B
= 7 << 16,
103 MLX5_ATOMIC_MODE_256B
= 8 << 16,
107 MLX5_REG_QETCR
= 0x4005,
108 MLX5_REG_QTCT
= 0x400a,
109 MLX5_REG_DCBX_PARAM
= 0x4020,
110 MLX5_REG_DCBX_APP
= 0x4021,
111 MLX5_REG_PCAP
= 0x5001,
112 MLX5_REG_PMTU
= 0x5003,
113 MLX5_REG_PTYS
= 0x5004,
114 MLX5_REG_PAOS
= 0x5006,
115 MLX5_REG_PFCC
= 0x5007,
116 MLX5_REG_PPCNT
= 0x5008,
117 MLX5_REG_PMAOS
= 0x5012,
118 MLX5_REG_PUDE
= 0x5009,
119 MLX5_REG_PMPE
= 0x5010,
120 MLX5_REG_PELC
= 0x500e,
121 MLX5_REG_PVLC
= 0x500f,
122 MLX5_REG_PCMR
= 0x5041,
123 MLX5_REG_PMLP
= 0x5002,
124 MLX5_REG_NODE_DESC
= 0x6001,
125 MLX5_REG_HOST_ENDIANNESS
= 0x7004,
126 MLX5_REG_MCIA
= 0x9014,
127 MLX5_REG_MLCR
= 0x902b,
130 enum mlx5_dcbx_oper_mode
{
131 MLX5E_DCBX_PARAM_VER_OPER_HOST
= 0x0,
132 MLX5E_DCBX_PARAM_VER_OPER_AUTO
= 0x3,
136 MLX5_ATOMIC_OPS_CMP_SWAP
= 1 << 0,
137 MLX5_ATOMIC_OPS_FETCH_ADD
= 1 << 1,
140 enum mlx5_page_fault_resume_flags
{
141 MLX5_PAGE_FAULT_RESUME_REQUESTOR
= 1 << 0,
142 MLX5_PAGE_FAULT_RESUME_WRITE
= 1 << 1,
143 MLX5_PAGE_FAULT_RESUME_RDMA
= 1 << 2,
144 MLX5_PAGE_FAULT_RESUME_ERROR
= 1 << 7,
153 struct mlx5_field_desc
{
158 struct mlx5_rsc_debug
{
159 struct mlx5_core_dev
*dev
;
161 enum dbg_rsc_type type
;
163 struct mlx5_field_desc fields
[0];
166 enum mlx5_dev_event
{
167 MLX5_DEV_EVENT_SYS_ERROR
,
168 MLX5_DEV_EVENT_PORT_UP
,
169 MLX5_DEV_EVENT_PORT_DOWN
,
170 MLX5_DEV_EVENT_PORT_INITIALIZED
,
171 MLX5_DEV_EVENT_LID_CHANGE
,
172 MLX5_DEV_EVENT_PKEY_CHANGE
,
173 MLX5_DEV_EVENT_GUID_CHANGE
,
174 MLX5_DEV_EVENT_CLIENT_REREG
,
177 enum mlx5_port_status
{
185 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
190 struct mlx5_bfreg_info
{
192 int num_low_latency_bfregs
;
196 * protect bfreg allocation data structs
204 struct mlx5_cmd_first
{
208 struct mlx5_cmd_msg
{
209 struct list_head list
;
210 struct cmd_msg_cache
*parent
;
212 struct mlx5_cmd_first first
;
213 struct mlx5_cmd_mailbox
*next
;
216 struct mlx5_cmd_debug
{
217 struct dentry
*dbg_root
;
218 struct dentry
*dbg_in
;
219 struct dentry
*dbg_out
;
220 struct dentry
*dbg_outlen
;
221 struct dentry
*dbg_status
;
222 struct dentry
*dbg_run
;
230 struct cmd_msg_cache
{
231 /* protect block chain allocations
234 struct list_head head
;
235 unsigned int max_inbox_size
;
236 unsigned int num_ent
;
240 MLX5_NUM_COMMAND_CACHES
= 5,
243 struct mlx5_cmd_stats
{
248 struct dentry
*count
;
249 /* protect command average calculations */
255 dma_addr_t alloc_dma
;
266 /* protect command queue allocations
268 spinlock_t alloc_lock
;
270 /* protect token allocations
272 spinlock_t token_lock
;
274 unsigned long bitmask
;
275 char wq_name
[MLX5_CMD_WQ_MAX_NAME
];
276 struct workqueue_struct
*wq
;
277 struct semaphore sem
;
278 struct semaphore pages_sem
;
280 struct mlx5_cmd_work_ent
*ent_arr
[MLX5_MAX_COMMANDS
];
281 struct pci_pool
*pool
;
282 struct mlx5_cmd_debug dbg
;
283 struct cmd_msg_cache cache
[MLX5_NUM_COMMAND_CACHES
];
284 int checksum_disabled
;
285 struct mlx5_cmd_stats stats
[MLX5_CMD_OP_MAX
];
288 struct mlx5_port_caps
{
295 struct mlx5_cmd_mailbox
{
298 struct mlx5_cmd_mailbox
*next
;
301 struct mlx5_buf_list
{
307 struct mlx5_buf_list direct
;
313 struct mlx5_frag_buf
{
314 struct mlx5_buf_list
*frags
;
320 struct mlx5_eq_tasklet
{
321 struct list_head list
;
322 struct list_head process_list
;
323 struct tasklet_struct task
;
324 /* lock on completion tasklet list */
328 struct mlx5_eq_pagefault
{
329 struct work_struct work
;
330 /* Pagefaults lock */
332 struct workqueue_struct
*wq
;
337 struct mlx5_core_dev
*dev
;
338 __be32 __iomem
*doorbell
;
346 struct list_head list
;
348 struct mlx5_rsc_debug
*dbg
;
349 enum mlx5_eq_type type
;
351 struct mlx5_eq_tasklet tasklet_ctx
;
352 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
353 struct mlx5_eq_pagefault pf_ctx
;
358 struct mlx5_core_psv
{
370 struct mlx5_core_sig_ctx
{
371 struct mlx5_core_psv psv_memory
;
372 struct mlx5_core_psv psv_wire
;
373 struct ib_sig_err err_item
;
374 bool sig_status_checked
;
384 struct mlx5_core_mkey
{
392 #define MLX5_24BIT_MASK ((1 << 24) - 1)
395 MLX5_RES_QP
= MLX5_EVENT_QUEUE_TYPE_QP
,
396 MLX5_RES_RQ
= MLX5_EVENT_QUEUE_TYPE_RQ
,
397 MLX5_RES_SQ
= MLX5_EVENT_QUEUE_TYPE_SQ
,
402 struct mlx5_core_rsc_common
{
403 enum mlx5_res_type res
;
405 struct completion free
;
408 struct mlx5_core_srq
{
409 struct mlx5_core_rsc_common common
; /* must be first */
413 int max_avail_gather
;
415 void (*event
) (struct mlx5_core_srq
*, enum mlx5_event
);
418 struct completion free
;
421 struct mlx5_eq_table
{
422 void __iomem
*update_ci
;
423 void __iomem
*update_arm_ci
;
424 struct list_head comp_eqs_list
;
425 struct mlx5_eq pages_eq
;
426 struct mlx5_eq async_eq
;
427 struct mlx5_eq cmd_eq
;
428 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
429 struct mlx5_eq pfault_eq
;
431 int num_comp_vectors
;
437 struct mlx5_uars_page
{
441 struct list_head list
;
443 unsigned long *reg_bitmap
; /* for non fast path bf regs */
444 unsigned long *fp_bitmap
;
445 unsigned int reg_avail
;
446 unsigned int fp_avail
;
447 struct kref ref_count
;
448 struct mlx5_core_dev
*mdev
;
451 struct mlx5_bfreg_head
{
452 /* protect blue flame registers allocations */
454 struct list_head list
;
457 struct mlx5_bfreg_data
{
458 struct mlx5_bfreg_head reg_head
;
459 struct mlx5_bfreg_head wc_head
;
462 struct mlx5_sq_bfreg
{
464 struct mlx5_uars_page
*up
;
470 struct mlx5_core_health
{
471 struct health_buffer __iomem
*health
;
472 __be32 __iomem
*health_counter
;
473 struct timer_list timer
;
477 /* wq spinlock to synchronize draining */
479 struct workqueue_struct
*wq
;
481 struct work_struct work
;
482 struct delayed_work recover_work
;
485 struct mlx5_cq_table
{
486 /* protect radix tree
489 struct radix_tree_root tree
;
492 struct mlx5_qp_table
{
493 /* protect radix tree
496 struct radix_tree_root tree
;
499 struct mlx5_srq_table
{
500 /* protect radix tree
503 struct radix_tree_root tree
;
506 struct mlx5_mkey_table
{
507 /* protect radix tree
510 struct radix_tree_root tree
;
513 struct mlx5_vf_context
{
517 struct mlx5_core_sriov
{
518 struct mlx5_vf_context
*vfs_ctx
;
523 struct mlx5_irq_info
{
525 char name
[MLX5_MAX_IRQ_NAME
];
528 struct mlx5_fc_stats
{
529 struct rb_root counters
;
530 struct list_head addlist
;
531 /* protect addlist add/splice operations */
532 spinlock_t addlist_lock
;
534 struct workqueue_struct
*wq
;
535 struct delayed_work work
;
536 unsigned long next_query
;
541 struct mlx5_pagefault
;
543 struct mlx5_rl_entry
{
549 struct mlx5_rl_table
{
550 /* protect rate limit table */
551 struct mutex rl_lock
;
555 struct mlx5_rl_entry
*rl_entry
;
558 enum port_module_event_status_type
{
559 MLX5_MODULE_STATUS_PLUGGED
= 0x1,
560 MLX5_MODULE_STATUS_UNPLUGGED
= 0x2,
561 MLX5_MODULE_STATUS_ERROR
= 0x3,
562 MLX5_MODULE_STATUS_NUM
= 0x3,
565 enum port_module_event_error_type
{
566 MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED
,
567 MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE
,
568 MLX5_MODULE_EVENT_ERROR_BUS_STUCK
,
569 MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT
,
570 MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST
,
571 MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER
,
572 MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE
,
573 MLX5_MODULE_EVENT_ERROR_BAD_CABLE
,
574 MLX5_MODULE_EVENT_ERROR_UNKNOWN
,
575 MLX5_MODULE_EVENT_ERROR_NUM
,
578 struct mlx5_port_module_event_stats
{
579 u64 status_counters
[MLX5_MODULE_STATUS_NUM
];
580 u64 error_counters
[MLX5_MODULE_EVENT_ERROR_NUM
];
584 char name
[MLX5_MAX_NAME_LEN
];
585 struct mlx5_eq_table eq_table
;
586 struct msix_entry
*msix_arr
;
587 struct mlx5_irq_info
*irq_info
;
590 struct workqueue_struct
*pg_wq
;
591 struct rb_root page_root
;
594 struct list_head free_list
;
597 struct mlx5_core_health health
;
599 struct mlx5_srq_table srq_table
;
601 /* start: qp staff */
602 struct mlx5_qp_table qp_table
;
603 struct dentry
*qp_debugfs
;
604 struct dentry
*eq_debugfs
;
605 struct dentry
*cq_debugfs
;
606 struct dentry
*cmdif_debugfs
;
609 /* start: cq staff */
610 struct mlx5_cq_table cq_table
;
613 /* start: mkey staff */
614 struct mlx5_mkey_table mkey_table
;
615 /* end: mkey staff */
617 /* start: alloc staff */
618 /* protect buffer alocation according to numa node */
619 struct mutex alloc_mutex
;
622 struct mutex pgdir_mutex
;
623 struct list_head pgdir_list
;
624 /* end: alloc staff */
625 struct dentry
*dbg_root
;
627 /* protect mkey key part */
628 spinlock_t mkey_lock
;
631 struct list_head dev_list
;
632 struct list_head ctx_list
;
635 struct mlx5_flow_steering
*steering
;
636 struct mlx5_eswitch
*eswitch
;
637 struct mlx5_core_sriov sriov
;
638 struct mlx5_lag
*lag
;
639 unsigned long pci_dev_data
;
640 struct mlx5_fc_stats fc_stats
;
641 struct mlx5_rl_table rl_table
;
643 struct mlx5_port_module_event_stats pme_stats
;
645 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
646 void (*pfault
)(struct mlx5_core_dev
*dev
,
648 struct mlx5_pagefault
*pfault
);
650 struct srcu_struct pfault_srcu
;
652 struct mlx5_bfreg_data bfregs
;
653 struct mlx5_uars_page
*uar
;
656 enum mlx5_device_state
{
657 MLX5_DEVICE_STATE_UP
,
658 MLX5_DEVICE_STATE_INTERNAL_ERROR
,
661 enum mlx5_interface_state
{
662 MLX5_INTERFACE_STATE_DOWN
= BIT(0),
663 MLX5_INTERFACE_STATE_UP
= BIT(1),
664 MLX5_INTERFACE_STATE_SHUTDOWN
= BIT(2),
667 enum mlx5_pci_status
{
668 MLX5_PCI_STATUS_DISABLED
,
669 MLX5_PCI_STATUS_ENABLED
,
672 enum mlx5_pagefault_type_flags
{
673 MLX5_PFAULT_REQUESTOR
= 1 << 0,
674 MLX5_PFAULT_WRITE
= 1 << 1,
675 MLX5_PFAULT_RDMA
= 1 << 2,
678 /* Contains the details of a pagefault. */
679 struct mlx5_pagefault
{
685 /* Initiator or send message responder pagefault details. */
687 /* Received packet size, only valid for responders. */
690 * Number of resource holding WQE, depends on type.
694 * WQE index. Refers to either the send queue or
695 * receive queue, according to event_subtype.
699 /* RDMA responder pagefault details */
703 * Received packet size, minimal size page fault
704 * resolution required for forward progress.
713 struct work_struct work
;
717 struct list_head tirs_list
;
721 struct mlx5e_resources
{
724 struct mlx5_core_mkey mkey
;
727 struct mlx5_core_dev
{
728 struct pci_dev
*pdev
;
730 struct mutex pci_status_mutex
;
731 enum mlx5_pci_status pci_status
;
733 char board_id
[MLX5_BOARD_ID_LEN
];
735 struct mlx5_port_caps port_caps
[MLX5_MAX_PORTS
];
736 u32 hca_caps_cur
[MLX5_CAP_NUM
][MLX5_UN_SZ_DW(hca_cap_union
)];
737 u32 hca_caps_max
[MLX5_CAP_NUM
][MLX5_UN_SZ_DW(hca_cap_union
)];
738 phys_addr_t iseg_base
;
739 struct mlx5_init_seg __iomem
*iseg
;
740 enum mlx5_device_state state
;
741 /* sync interface state */
742 struct mutex intf_state_mutex
;
743 unsigned long intf_state
;
744 void (*event
) (struct mlx5_core_dev
*dev
,
745 enum mlx5_dev_event event
,
746 unsigned long param
);
747 struct mlx5_priv priv
;
748 struct mlx5_profile
*profile
;
751 struct mlx5e_resources mlx5e_res
;
752 #ifdef CONFIG_RFS_ACCEL
753 struct cpu_rmap
*rmap
;
760 struct mlx5_db_pgdir
*pgdir
;
761 struct mlx5_ib_user_db_page
*user_page
;
768 MLX5_COMP_EQ_SIZE
= 1024,
772 MLX5_PTYS_IB
= 1 << 0,
773 MLX5_PTYS_EN
= 1 << 2,
776 typedef void (*mlx5_cmd_cbk_t
)(int status
, void *context
);
778 struct mlx5_cmd_work_ent
{
779 struct mlx5_cmd_msg
*in
;
780 struct mlx5_cmd_msg
*out
;
783 mlx5_cmd_cbk_t callback
;
784 struct delayed_work cb_timeout_work
;
787 struct completion done
;
788 struct mlx5_cmd
*cmd
;
789 struct work_struct work
;
790 struct mlx5_cmd_layout
*lay
;
805 enum port_state_policy
{
806 MLX5_POLICY_DOWN
= 0,
808 MLX5_POLICY_FOLLOW
= 2,
809 MLX5_POLICY_INVALID
= 0xffffffff
812 enum phy_port_state
{
816 struct mlx5_hca_vport_context
{
821 enum port_state_policy policy
;
822 enum phy_port_state phys_state
;
823 enum ib_port_state vport_state
;
824 u8 port_physical_state
;
833 u8 init_type_reply
; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
838 u16 qkey_violation_counter
;
839 u16 pkey_violation_counter
;
843 static inline void *mlx5_buf_offset(struct mlx5_buf
*buf
, int offset
)
845 return buf
->direct
.buf
+ offset
;
848 extern struct workqueue_struct
*mlx5_core_wq
;
850 #define STRUCT_FIELD(header, field) \
851 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
852 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
854 static inline struct mlx5_core_dev
*pci2mlx5_core_dev(struct pci_dev
*pdev
)
856 return pci_get_drvdata(pdev
);
859 extern struct dentry
*mlx5_debugfs_root
;
861 static inline u16
fw_rev_maj(struct mlx5_core_dev
*dev
)
863 return ioread32be(&dev
->iseg
->fw_rev
) & 0xffff;
866 static inline u16
fw_rev_min(struct mlx5_core_dev
*dev
)
868 return ioread32be(&dev
->iseg
->fw_rev
) >> 16;
871 static inline u16
fw_rev_sub(struct mlx5_core_dev
*dev
)
873 return ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) & 0xffff;
876 static inline u16
cmdif_rev(struct mlx5_core_dev
*dev
)
878 return ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) >> 16;
881 static inline void *mlx5_vzalloc(unsigned long size
)
885 rtn
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
891 static inline u32
mlx5_base_mkey(const u32 key
)
893 return key
& 0xffffff00u
;
896 int mlx5_cmd_init(struct mlx5_core_dev
*dev
);
897 void mlx5_cmd_cleanup(struct mlx5_core_dev
*dev
);
898 void mlx5_cmd_use_events(struct mlx5_core_dev
*dev
);
899 void mlx5_cmd_use_polling(struct mlx5_core_dev
*dev
);
901 int mlx5_cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
903 int mlx5_cmd_exec_cb(struct mlx5_core_dev
*dev
, void *in
, int in_size
,
904 void *out
, int out_size
, mlx5_cmd_cbk_t callback
,
906 void mlx5_cmd_mbox_status(void *out
, u8
*status
, u32
*syndrome
);
908 int mlx5_core_get_caps(struct mlx5_core_dev
*dev
, enum mlx5_cap_type cap_type
);
909 int mlx5_cmd_alloc_uar(struct mlx5_core_dev
*dev
, u32
*uarn
);
910 int mlx5_cmd_free_uar(struct mlx5_core_dev
*dev
, u32 uarn
);
911 void mlx5_health_cleanup(struct mlx5_core_dev
*dev
);
912 int mlx5_health_init(struct mlx5_core_dev
*dev
);
913 void mlx5_start_health_poll(struct mlx5_core_dev
*dev
);
914 void mlx5_stop_health_poll(struct mlx5_core_dev
*dev
);
915 void mlx5_drain_health_wq(struct mlx5_core_dev
*dev
);
916 int mlx5_buf_alloc_node(struct mlx5_core_dev
*dev
, int size
,
917 struct mlx5_buf
*buf
, int node
);
918 int mlx5_buf_alloc(struct mlx5_core_dev
*dev
, int size
, struct mlx5_buf
*buf
);
919 void mlx5_buf_free(struct mlx5_core_dev
*dev
, struct mlx5_buf
*buf
);
920 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev
*dev
, int size
,
921 struct mlx5_frag_buf
*buf
, int node
);
922 void mlx5_frag_buf_free(struct mlx5_core_dev
*dev
, struct mlx5_frag_buf
*buf
);
923 struct mlx5_cmd_mailbox
*mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev
*dev
,
924 gfp_t flags
, int npages
);
925 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev
*dev
,
926 struct mlx5_cmd_mailbox
*head
);
927 int mlx5_core_create_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
928 struct mlx5_srq_attr
*in
);
929 int mlx5_core_destroy_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
);
930 int mlx5_core_query_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
931 struct mlx5_srq_attr
*out
);
932 int mlx5_core_arm_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
933 u16 lwm
, int is_srq
);
934 void mlx5_init_mkey_table(struct mlx5_core_dev
*dev
);
935 void mlx5_cleanup_mkey_table(struct mlx5_core_dev
*dev
);
936 int mlx5_core_create_mkey_cb(struct mlx5_core_dev
*dev
,
937 struct mlx5_core_mkey
*mkey
,
939 u32
*out
, int outlen
,
940 mlx5_cmd_cbk_t callback
, void *context
);
941 int mlx5_core_create_mkey(struct mlx5_core_dev
*dev
,
942 struct mlx5_core_mkey
*mkey
,
944 int mlx5_core_destroy_mkey(struct mlx5_core_dev
*dev
,
945 struct mlx5_core_mkey
*mkey
);
946 int mlx5_core_query_mkey(struct mlx5_core_dev
*dev
, struct mlx5_core_mkey
*mkey
,
947 u32
*out
, int outlen
);
948 int mlx5_core_dump_fill_mkey(struct mlx5_core_dev
*dev
, struct mlx5_core_mkey
*_mkey
,
950 int mlx5_core_alloc_pd(struct mlx5_core_dev
*dev
, u32
*pdn
);
951 int mlx5_core_dealloc_pd(struct mlx5_core_dev
*dev
, u32 pdn
);
952 int mlx5_core_mad_ifc(struct mlx5_core_dev
*dev
, const void *inb
, void *outb
,
954 void mlx5_pagealloc_init(struct mlx5_core_dev
*dev
);
955 void mlx5_pagealloc_cleanup(struct mlx5_core_dev
*dev
);
956 int mlx5_pagealloc_start(struct mlx5_core_dev
*dev
);
957 void mlx5_pagealloc_stop(struct mlx5_core_dev
*dev
);
958 void mlx5_core_req_pages_handler(struct mlx5_core_dev
*dev
, u16 func_id
,
960 int mlx5_satisfy_startup_pages(struct mlx5_core_dev
*dev
, int boot
);
961 int mlx5_reclaim_startup_pages(struct mlx5_core_dev
*dev
);
962 void mlx5_register_debugfs(void);
963 void mlx5_unregister_debugfs(void);
964 int mlx5_eq_init(struct mlx5_core_dev
*dev
);
965 void mlx5_eq_cleanup(struct mlx5_core_dev
*dev
);
966 void mlx5_fill_page_array(struct mlx5_buf
*buf
, __be64
*pas
);
967 void mlx5_fill_page_frag_array(struct mlx5_frag_buf
*frag_buf
, __be64
*pas
);
968 void mlx5_cq_completion(struct mlx5_core_dev
*dev
, u32 cqn
);
969 void mlx5_rsc_event(struct mlx5_core_dev
*dev
, u32 rsn
, int event_type
);
970 void mlx5_srq_event(struct mlx5_core_dev
*dev
, u32 srqn
, int event_type
);
971 struct mlx5_core_srq
*mlx5_core_get_srq(struct mlx5_core_dev
*dev
, u32 srqn
);
972 void mlx5_cmd_comp_handler(struct mlx5_core_dev
*dev
, u64 vec
);
973 void mlx5_cq_event(struct mlx5_core_dev
*dev
, u32 cqn
, int event_type
);
974 int mlx5_create_map_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
, u8 vecidx
,
975 int nent
, u64 mask
, const char *name
,
976 enum mlx5_eq_type type
);
977 int mlx5_destroy_unmap_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
978 int mlx5_start_eqs(struct mlx5_core_dev
*dev
);
979 int mlx5_stop_eqs(struct mlx5_core_dev
*dev
);
980 int mlx5_vector2eqn(struct mlx5_core_dev
*dev
, int vector
, int *eqn
,
982 int mlx5_core_attach_mcg(struct mlx5_core_dev
*dev
, union ib_gid
*mgid
, u32 qpn
);
983 int mlx5_core_detach_mcg(struct mlx5_core_dev
*dev
, union ib_gid
*mgid
, u32 qpn
);
985 int mlx5_qp_debugfs_init(struct mlx5_core_dev
*dev
);
986 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev
*dev
);
987 int mlx5_core_access_reg(struct mlx5_core_dev
*dev
, void *data_in
,
988 int size_in
, void *data_out
, int size_out
,
989 u16 reg_num
, int arg
, int write
);
991 int mlx5_debug_eq_add(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
992 void mlx5_debug_eq_remove(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
993 int mlx5_core_eq_query(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
,
994 u32
*out
, int outlen
);
995 int mlx5_eq_debugfs_init(struct mlx5_core_dev
*dev
);
996 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev
*dev
);
997 int mlx5_cq_debugfs_init(struct mlx5_core_dev
*dev
);
998 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev
*dev
);
999 int mlx5_db_alloc(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
);
1000 int mlx5_db_alloc_node(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
,
1002 void mlx5_db_free(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
);
1004 const char *mlx5_command_str(int command
);
1005 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev
*dev
);
1006 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev
*dev
);
1007 int mlx5_core_create_psv(struct mlx5_core_dev
*dev
, u32 pdn
,
1008 int npsvs
, u32
*sig_index
);
1009 int mlx5_core_destroy_psv(struct mlx5_core_dev
*dev
, int psv_num
);
1010 void mlx5_core_put_rsc(struct mlx5_core_rsc_common
*common
);
1011 int mlx5_query_odp_caps(struct mlx5_core_dev
*dev
,
1012 struct mlx5_odp_caps
*odp_caps
);
1013 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev
*dev
,
1014 u8 port_num
, void *out
, size_t sz
);
1015 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1016 int mlx5_core_page_fault_resume(struct mlx5_core_dev
*dev
, u32 token
,
1017 u32 wq_num
, u8 type
, int error
);
1020 int mlx5_init_rl_table(struct mlx5_core_dev
*dev
);
1021 void mlx5_cleanup_rl_table(struct mlx5_core_dev
*dev
);
1022 int mlx5_rl_add_rate(struct mlx5_core_dev
*dev
, u32 rate
, u16
*index
);
1023 void mlx5_rl_remove_rate(struct mlx5_core_dev
*dev
, u32 rate
);
1024 bool mlx5_rl_is_in_range(struct mlx5_core_dev
*dev
, u32 rate
);
1025 int mlx5_alloc_bfreg(struct mlx5_core_dev
*mdev
, struct mlx5_sq_bfreg
*bfreg
,
1026 bool map_wc
, bool fast_path
);
1027 void mlx5_free_bfreg(struct mlx5_core_dev
*mdev
, struct mlx5_sq_bfreg
*bfreg
);
1029 static inline int fw_initializing(struct mlx5_core_dev
*dev
)
1031 return ioread32be(&dev
->iseg
->initializing
) >> 31;
1034 static inline u32
mlx5_mkey_to_idx(u32 mkey
)
1039 static inline u32
mlx5_idx_to_mkey(u32 mkey_idx
)
1041 return mkey_idx
<< 8;
1044 static inline u8
mlx5_mkey_variant(u32 mkey
)
1050 MLX5_PROF_MASK_QP_SIZE
= (u64
)1 << 0,
1051 MLX5_PROF_MASK_MR_CACHE
= (u64
)1 << 1,
1055 MAX_UMR_CACHE_ENTRY
= 20,
1056 MLX5_IMR_MTT_CACHE_ENTRY
,
1057 MLX5_IMR_KSM_CACHE_ENTRY
,
1058 MAX_MR_CACHE_ENTRIES
1062 MLX5_INTERFACE_PROTOCOL_IB
= 0,
1063 MLX5_INTERFACE_PROTOCOL_ETH
= 1,
1066 struct mlx5_interface
{
1067 void * (*add
)(struct mlx5_core_dev
*dev
);
1068 void (*remove
)(struct mlx5_core_dev
*dev
, void *context
);
1069 int (*attach
)(struct mlx5_core_dev
*dev
, void *context
);
1070 void (*detach
)(struct mlx5_core_dev
*dev
, void *context
);
1071 void (*event
)(struct mlx5_core_dev
*dev
, void *context
,
1072 enum mlx5_dev_event event
, unsigned long param
);
1073 void (*pfault
)(struct mlx5_core_dev
*dev
,
1075 struct mlx5_pagefault
*pfault
);
1076 void * (*get_dev
)(void *context
);
1078 struct list_head list
;
1081 void *mlx5_get_protocol_dev(struct mlx5_core_dev
*mdev
, int protocol
);
1082 int mlx5_register_interface(struct mlx5_interface
*intf
);
1083 void mlx5_unregister_interface(struct mlx5_interface
*intf
);
1084 int mlx5_core_query_vendor_id(struct mlx5_core_dev
*mdev
, u32
*vendor_id
);
1086 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev
*dev
);
1087 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev
*dev
);
1088 bool mlx5_lag_is_active(struct mlx5_core_dev
*dev
);
1089 struct net_device
*mlx5_lag_get_roce_netdev(struct mlx5_core_dev
*dev
);
1090 struct mlx5_uars_page
*mlx5_get_uars_page(struct mlx5_core_dev
*mdev
);
1091 void mlx5_put_uars_page(struct mlx5_core_dev
*mdev
, struct mlx5_uars_page
*up
);
1093 struct mlx5_profile
{
1099 } mr_cache
[MAX_MR_CACHE_ENTRIES
];
1103 MLX5_PCI_DEV_IS_VF
= 1 << 0,
1106 static inline int mlx5_core_is_pf(struct mlx5_core_dev
*dev
)
1108 return !(dev
->priv
.pci_dev_data
& MLX5_PCI_DEV_IS_VF
);
1111 static inline int mlx5_get_gid_table_len(u16 param
)
1114 pr_warn("gid table length is zero\n");
1118 return 8 * (1 << param
);
1121 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev
*dev
)
1123 return !!(dev
->priv
.rl_table
.max_size
);
1127 MLX5_TRIGGERED_CMD_COMP
= (u64
)1 << 32,
1130 #endif /* MLX5_DRIVER_H */