2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/spinlock_types.h>
40 #include <linux/semaphore.h>
41 #include <linux/slab.h>
42 #include <linux/vmalloc.h>
43 #include <linux/radix-tree.h>
44 #include <linux/workqueue.h>
45 #include <linux/interrupt.h>
47 #include <linux/mlx5/device.h>
48 #include <linux/mlx5/doorbell.h>
49 #include <linux/mlx5/srq.h>
52 MLX5_BOARD_ID_LEN
= 64,
53 MLX5_MAX_NAME_LEN
= 16,
57 /* one minute for the sake of bringup. Generally, commands must always
58 * complete and we may need to increase this timeout value
60 MLX5_CMD_TIMEOUT_MSEC
= 60 * 1000,
61 MLX5_CMD_WQ_MAX_NAME
= 32,
67 CMD_STATUS_SUCCESS
= 0,
73 MLX5_SQP_IEEE_1588
= 2,
75 MLX5_SQP_SYNC_UMR
= 4,
83 MLX5_EQ_VEC_PAGES
= 0,
85 MLX5_EQ_VEC_ASYNC
= 2,
86 MLX5_EQ_VEC_COMP_BASE
,
90 MLX5_MAX_IRQ_NAME
= 32
94 MLX5_ATOMIC_MODE_IB_COMP
= 1 << 16,
95 MLX5_ATOMIC_MODE_CX
= 2 << 16,
96 MLX5_ATOMIC_MODE_8B
= 3 << 16,
97 MLX5_ATOMIC_MODE_16B
= 4 << 16,
98 MLX5_ATOMIC_MODE_32B
= 5 << 16,
99 MLX5_ATOMIC_MODE_64B
= 6 << 16,
100 MLX5_ATOMIC_MODE_128B
= 7 << 16,
101 MLX5_ATOMIC_MODE_256B
= 8 << 16,
105 MLX5_REG_QETCR
= 0x4005,
106 MLX5_REG_QTCT
= 0x400a,
107 MLX5_REG_DCBX_PARAM
= 0x4020,
108 MLX5_REG_DCBX_APP
= 0x4021,
109 MLX5_REG_PCAP
= 0x5001,
110 MLX5_REG_PMTU
= 0x5003,
111 MLX5_REG_PTYS
= 0x5004,
112 MLX5_REG_PAOS
= 0x5006,
113 MLX5_REG_PFCC
= 0x5007,
114 MLX5_REG_PPCNT
= 0x5008,
115 MLX5_REG_PMAOS
= 0x5012,
116 MLX5_REG_PUDE
= 0x5009,
117 MLX5_REG_PMPE
= 0x5010,
118 MLX5_REG_PELC
= 0x500e,
119 MLX5_REG_PVLC
= 0x500f,
120 MLX5_REG_PCMR
= 0x5041,
121 MLX5_REG_PMLP
= 0x5002,
122 MLX5_REG_NODE_DESC
= 0x6001,
123 MLX5_REG_HOST_ENDIANNESS
= 0x7004,
124 MLX5_REG_MCIA
= 0x9014,
125 MLX5_REG_MLCR
= 0x902b,
128 enum mlx5_dcbx_oper_mode
{
129 MLX5E_DCBX_PARAM_VER_OPER_HOST
= 0x0,
130 MLX5E_DCBX_PARAM_VER_OPER_AUTO
= 0x3,
134 MLX5_ATOMIC_OPS_CMP_SWAP
= 1 << 0,
135 MLX5_ATOMIC_OPS_FETCH_ADD
= 1 << 1,
138 enum mlx5_page_fault_resume_flags
{
139 MLX5_PAGE_FAULT_RESUME_REQUESTOR
= 1 << 0,
140 MLX5_PAGE_FAULT_RESUME_WRITE
= 1 << 1,
141 MLX5_PAGE_FAULT_RESUME_RDMA
= 1 << 2,
142 MLX5_PAGE_FAULT_RESUME_ERROR
= 1 << 7,
151 struct mlx5_field_desc
{
156 struct mlx5_rsc_debug
{
157 struct mlx5_core_dev
*dev
;
159 enum dbg_rsc_type type
;
161 struct mlx5_field_desc fields
[0];
164 enum mlx5_dev_event
{
165 MLX5_DEV_EVENT_SYS_ERROR
,
166 MLX5_DEV_EVENT_PORT_UP
,
167 MLX5_DEV_EVENT_PORT_DOWN
,
168 MLX5_DEV_EVENT_PORT_INITIALIZED
,
169 MLX5_DEV_EVENT_LID_CHANGE
,
170 MLX5_DEV_EVENT_PKEY_CHANGE
,
171 MLX5_DEV_EVENT_GUID_CHANGE
,
172 MLX5_DEV_EVENT_CLIENT_REREG
,
175 enum mlx5_port_status
{
180 struct mlx5_uuar_info
{
181 struct mlx5_uar
*uars
;
183 int num_low_latency_uuars
;
184 unsigned long *bitmap
;
189 * protect uuar allocation data structs
197 void __iomem
*regreg
;
199 struct mlx5_uar
*uar
;
200 unsigned long offset
;
202 /* protect blue flame buffer selection when needed
206 /* serialize 64 bit writes when done as two 32 bit accesses
212 struct mlx5_cmd_first
{
216 struct mlx5_cmd_msg
{
217 struct list_head list
;
218 struct cmd_msg_cache
*parent
;
220 struct mlx5_cmd_first first
;
221 struct mlx5_cmd_mailbox
*next
;
224 struct mlx5_cmd_debug
{
225 struct dentry
*dbg_root
;
226 struct dentry
*dbg_in
;
227 struct dentry
*dbg_out
;
228 struct dentry
*dbg_outlen
;
229 struct dentry
*dbg_status
;
230 struct dentry
*dbg_run
;
238 struct cmd_msg_cache
{
239 /* protect block chain allocations
242 struct list_head head
;
243 unsigned int max_inbox_size
;
244 unsigned int num_ent
;
248 MLX5_NUM_COMMAND_CACHES
= 5,
251 struct mlx5_cmd_stats
{
256 struct dentry
*count
;
257 /* protect command average calculations */
263 dma_addr_t alloc_dma
;
274 /* protect command queue allocations
276 spinlock_t alloc_lock
;
278 /* protect token allocations
280 spinlock_t token_lock
;
282 unsigned long bitmask
;
283 char wq_name
[MLX5_CMD_WQ_MAX_NAME
];
284 struct workqueue_struct
*wq
;
285 struct semaphore sem
;
286 struct semaphore pages_sem
;
288 struct mlx5_cmd_work_ent
*ent_arr
[MLX5_MAX_COMMANDS
];
289 struct pci_pool
*pool
;
290 struct mlx5_cmd_debug dbg
;
291 struct cmd_msg_cache cache
[MLX5_NUM_COMMAND_CACHES
];
292 int checksum_disabled
;
293 struct mlx5_cmd_stats stats
[MLX5_CMD_OP_MAX
];
296 struct mlx5_port_caps
{
302 struct mlx5_cmd_mailbox
{
305 struct mlx5_cmd_mailbox
*next
;
308 struct mlx5_buf_list
{
314 struct mlx5_buf_list direct
;
320 struct mlx5_frag_buf
{
321 struct mlx5_buf_list
*frags
;
327 struct mlx5_eq_tasklet
{
328 struct list_head list
;
329 struct list_head process_list
;
330 struct tasklet_struct task
;
331 /* lock on completion tasklet list */
336 struct mlx5_core_dev
*dev
;
337 __be32 __iomem
*doorbell
;
345 struct list_head list
;
347 struct mlx5_rsc_debug
*dbg
;
348 struct mlx5_eq_tasklet tasklet_ctx
;
351 struct mlx5_core_psv
{
363 struct mlx5_core_sig_ctx
{
364 struct mlx5_core_psv psv_memory
;
365 struct mlx5_core_psv psv_wire
;
366 struct ib_sig_err err_item
;
367 bool sig_status_checked
;
372 struct mlx5_core_mkey
{
380 MLX5_RES_QP
= MLX5_EVENT_QUEUE_TYPE_QP
,
381 MLX5_RES_RQ
= MLX5_EVENT_QUEUE_TYPE_RQ
,
382 MLX5_RES_SQ
= MLX5_EVENT_QUEUE_TYPE_SQ
,
387 struct mlx5_core_rsc_common
{
388 enum mlx5_res_type res
;
390 struct completion free
;
393 struct mlx5_core_srq
{
394 struct mlx5_core_rsc_common common
; /* must be first */
398 int max_avail_gather
;
400 void (*event
) (struct mlx5_core_srq
*, enum mlx5_event
);
403 struct completion free
;
406 struct mlx5_eq_table
{
407 void __iomem
*update_ci
;
408 void __iomem
*update_arm_ci
;
409 struct list_head comp_eqs_list
;
410 struct mlx5_eq pages_eq
;
411 struct mlx5_eq async_eq
;
412 struct mlx5_eq cmd_eq
;
413 int num_comp_vectors
;
421 struct list_head bf_list
;
422 unsigned free_bf_bmap
;
423 void __iomem
*bf_map
;
428 struct mlx5_core_health
{
429 struct health_buffer __iomem
*health
;
430 __be32 __iomem
*health_counter
;
431 struct timer_list timer
;
435 /* wq spinlock to synchronize draining */
437 struct workqueue_struct
*wq
;
439 struct work_struct work
;
440 struct delayed_work recover_work
;
443 struct mlx5_cq_table
{
444 /* protect radix tree
447 struct radix_tree_root tree
;
450 struct mlx5_qp_table
{
451 /* protect radix tree
454 struct radix_tree_root tree
;
457 struct mlx5_srq_table
{
458 /* protect radix tree
461 struct radix_tree_root tree
;
464 struct mlx5_mkey_table
{
465 /* protect radix tree
468 struct radix_tree_root tree
;
471 struct mlx5_vf_context
{
475 struct mlx5_core_sriov
{
476 struct mlx5_vf_context
*vfs_ctx
;
481 struct mlx5_irq_info
{
483 char name
[MLX5_MAX_IRQ_NAME
];
486 struct mlx5_fc_stats
{
487 struct rb_root counters
;
488 struct list_head addlist
;
489 /* protect addlist add/splice operations */
490 spinlock_t addlist_lock
;
492 struct workqueue_struct
*wq
;
493 struct delayed_work work
;
494 unsigned long next_query
;
500 struct mlx5_rl_entry
{
506 struct mlx5_rl_table
{
507 /* protect rate limit table */
508 struct mutex rl_lock
;
512 struct mlx5_rl_entry
*rl_entry
;
515 enum port_module_event_status_type
{
516 MLX5_MODULE_STATUS_PLUGGED
= 0x1,
517 MLX5_MODULE_STATUS_UNPLUGGED
= 0x2,
518 MLX5_MODULE_STATUS_ERROR
= 0x3,
519 MLX5_MODULE_STATUS_NUM
= 0x3,
522 enum port_module_event_error_type
{
523 MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED
,
524 MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE
,
525 MLX5_MODULE_EVENT_ERROR_BUS_STUCK
,
526 MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT
,
527 MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST
,
528 MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER
,
529 MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE
,
530 MLX5_MODULE_EVENT_ERROR_BAD_CABLE
,
531 MLX5_MODULE_EVENT_ERROR_UNKNOWN
,
532 MLX5_MODULE_EVENT_ERROR_NUM
,
535 struct mlx5_port_module_event_stats
{
536 u64 status_counters
[MLX5_MODULE_STATUS_NUM
];
537 u64 error_counters
[MLX5_MODULE_EVENT_ERROR_NUM
];
541 char name
[MLX5_MAX_NAME_LEN
];
542 struct mlx5_eq_table eq_table
;
543 struct msix_entry
*msix_arr
;
544 struct mlx5_irq_info
*irq_info
;
545 struct mlx5_uuar_info uuari
;
546 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock
);
549 struct workqueue_struct
*pg_wq
;
550 struct rb_root page_root
;
553 struct list_head free_list
;
556 struct mlx5_core_health health
;
558 struct mlx5_srq_table srq_table
;
560 /* start: qp staff */
561 struct mlx5_qp_table qp_table
;
562 struct dentry
*qp_debugfs
;
563 struct dentry
*eq_debugfs
;
564 struct dentry
*cq_debugfs
;
565 struct dentry
*cmdif_debugfs
;
568 /* start: cq staff */
569 struct mlx5_cq_table cq_table
;
572 /* start: mkey staff */
573 struct mlx5_mkey_table mkey_table
;
574 /* end: mkey staff */
576 /* start: alloc staff */
577 /* protect buffer alocation according to numa node */
578 struct mutex alloc_mutex
;
581 struct mutex pgdir_mutex
;
582 struct list_head pgdir_list
;
583 /* end: alloc staff */
584 struct dentry
*dbg_root
;
586 /* protect mkey key part */
587 spinlock_t mkey_lock
;
590 struct list_head dev_list
;
591 struct list_head ctx_list
;
594 struct mlx5_flow_steering
*steering
;
595 struct mlx5_eswitch
*eswitch
;
596 struct mlx5_core_sriov sriov
;
597 struct mlx5_lag
*lag
;
598 unsigned long pci_dev_data
;
599 struct mlx5_fc_stats fc_stats
;
600 struct mlx5_rl_table rl_table
;
602 struct mlx5_port_module_event_stats pme_stats
;
605 enum mlx5_device_state
{
606 MLX5_DEVICE_STATE_UP
,
607 MLX5_DEVICE_STATE_INTERNAL_ERROR
,
610 enum mlx5_interface_state
{
611 MLX5_INTERFACE_STATE_DOWN
= BIT(0),
612 MLX5_INTERFACE_STATE_UP
= BIT(1),
613 MLX5_INTERFACE_STATE_SHUTDOWN
= BIT(2),
616 enum mlx5_pci_status
{
617 MLX5_PCI_STATUS_DISABLED
,
618 MLX5_PCI_STATUS_ENABLED
,
622 struct list_head tirs_list
;
626 struct mlx5e_resources
{
627 struct mlx5_uar cq_uar
;
630 struct mlx5_core_mkey mkey
;
633 struct mlx5_core_dev
{
634 struct pci_dev
*pdev
;
636 struct mutex pci_status_mutex
;
637 enum mlx5_pci_status pci_status
;
639 char board_id
[MLX5_BOARD_ID_LEN
];
641 struct mlx5_port_caps port_caps
[MLX5_MAX_PORTS
];
642 u32 hca_caps_cur
[MLX5_CAP_NUM
][MLX5_UN_SZ_DW(hca_cap_union
)];
643 u32 hca_caps_max
[MLX5_CAP_NUM
][MLX5_UN_SZ_DW(hca_cap_union
)];
644 phys_addr_t iseg_base
;
645 struct mlx5_init_seg __iomem
*iseg
;
646 enum mlx5_device_state state
;
647 /* sync interface state */
648 struct mutex intf_state_mutex
;
649 unsigned long intf_state
;
650 void (*event
) (struct mlx5_core_dev
*dev
,
651 enum mlx5_dev_event event
,
652 unsigned long param
);
653 struct mlx5_priv priv
;
654 struct mlx5_profile
*profile
;
657 struct mlx5e_resources mlx5e_res
;
658 #ifdef CONFIG_RFS_ACCEL
659 struct cpu_rmap
*rmap
;
666 struct mlx5_db_pgdir
*pgdir
;
667 struct mlx5_ib_user_db_page
*user_page
;
674 MLX5_COMP_EQ_SIZE
= 1024,
678 MLX5_PTYS_IB
= 1 << 0,
679 MLX5_PTYS_EN
= 1 << 2,
682 typedef void (*mlx5_cmd_cbk_t
)(int status
, void *context
);
684 struct mlx5_cmd_work_ent
{
685 struct mlx5_cmd_msg
*in
;
686 struct mlx5_cmd_msg
*out
;
689 mlx5_cmd_cbk_t callback
;
690 struct delayed_work cb_timeout_work
;
693 struct completion done
;
694 struct mlx5_cmd
*cmd
;
695 struct work_struct work
;
696 struct mlx5_cmd_layout
*lay
;
711 enum port_state_policy
{
712 MLX5_POLICY_DOWN
= 0,
714 MLX5_POLICY_FOLLOW
= 2,
715 MLX5_POLICY_INVALID
= 0xffffffff
718 enum phy_port_state
{
722 struct mlx5_hca_vport_context
{
727 enum port_state_policy policy
;
728 enum phy_port_state phys_state
;
729 enum ib_port_state vport_state
;
730 u8 port_physical_state
;
739 u8 init_type_reply
; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
744 u16 qkey_violation_counter
;
745 u16 pkey_violation_counter
;
749 static inline void *mlx5_buf_offset(struct mlx5_buf
*buf
, int offset
)
751 return buf
->direct
.buf
+ offset
;
754 extern struct workqueue_struct
*mlx5_core_wq
;
756 #define STRUCT_FIELD(header, field) \
757 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
758 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
760 static inline struct mlx5_core_dev
*pci2mlx5_core_dev(struct pci_dev
*pdev
)
762 return pci_get_drvdata(pdev
);
765 extern struct dentry
*mlx5_debugfs_root
;
767 static inline u16
fw_rev_maj(struct mlx5_core_dev
*dev
)
769 return ioread32be(&dev
->iseg
->fw_rev
) & 0xffff;
772 static inline u16
fw_rev_min(struct mlx5_core_dev
*dev
)
774 return ioread32be(&dev
->iseg
->fw_rev
) >> 16;
777 static inline u16
fw_rev_sub(struct mlx5_core_dev
*dev
)
779 return ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) & 0xffff;
782 static inline u16
cmdif_rev(struct mlx5_core_dev
*dev
)
784 return ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) >> 16;
787 static inline void *mlx5_vzalloc(unsigned long size
)
791 rtn
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
797 static inline u32
mlx5_base_mkey(const u32 key
)
799 return key
& 0xffffff00u
;
802 int mlx5_cmd_init(struct mlx5_core_dev
*dev
);
803 void mlx5_cmd_cleanup(struct mlx5_core_dev
*dev
);
804 void mlx5_cmd_use_events(struct mlx5_core_dev
*dev
);
805 void mlx5_cmd_use_polling(struct mlx5_core_dev
*dev
);
807 int mlx5_cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
809 int mlx5_cmd_exec_cb(struct mlx5_core_dev
*dev
, void *in
, int in_size
,
810 void *out
, int out_size
, mlx5_cmd_cbk_t callback
,
812 void mlx5_cmd_mbox_status(void *out
, u8
*status
, u32
*syndrome
);
814 int mlx5_core_get_caps(struct mlx5_core_dev
*dev
, enum mlx5_cap_type cap_type
);
815 int mlx5_cmd_alloc_uar(struct mlx5_core_dev
*dev
, u32
*uarn
);
816 int mlx5_cmd_free_uar(struct mlx5_core_dev
*dev
, u32 uarn
);
817 int mlx5_alloc_uuars(struct mlx5_core_dev
*dev
, struct mlx5_uuar_info
*uuari
);
818 int mlx5_free_uuars(struct mlx5_core_dev
*dev
, struct mlx5_uuar_info
*uuari
);
819 int mlx5_alloc_map_uar(struct mlx5_core_dev
*mdev
, struct mlx5_uar
*uar
,
821 void mlx5_unmap_free_uar(struct mlx5_core_dev
*mdev
, struct mlx5_uar
*uar
);
822 void mlx5_health_cleanup(struct mlx5_core_dev
*dev
);
823 int mlx5_health_init(struct mlx5_core_dev
*dev
);
824 void mlx5_start_health_poll(struct mlx5_core_dev
*dev
);
825 void mlx5_stop_health_poll(struct mlx5_core_dev
*dev
);
826 void mlx5_drain_health_wq(struct mlx5_core_dev
*dev
);
827 int mlx5_buf_alloc_node(struct mlx5_core_dev
*dev
, int size
,
828 struct mlx5_buf
*buf
, int node
);
829 int mlx5_buf_alloc(struct mlx5_core_dev
*dev
, int size
, struct mlx5_buf
*buf
);
830 void mlx5_buf_free(struct mlx5_core_dev
*dev
, struct mlx5_buf
*buf
);
831 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev
*dev
, int size
,
832 struct mlx5_frag_buf
*buf
, int node
);
833 void mlx5_frag_buf_free(struct mlx5_core_dev
*dev
, struct mlx5_frag_buf
*buf
);
834 struct mlx5_cmd_mailbox
*mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev
*dev
,
835 gfp_t flags
, int npages
);
836 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev
*dev
,
837 struct mlx5_cmd_mailbox
*head
);
838 int mlx5_core_create_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
839 struct mlx5_srq_attr
*in
);
840 int mlx5_core_destroy_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
);
841 int mlx5_core_query_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
842 struct mlx5_srq_attr
*out
);
843 int mlx5_core_arm_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
844 u16 lwm
, int is_srq
);
845 void mlx5_init_mkey_table(struct mlx5_core_dev
*dev
);
846 void mlx5_cleanup_mkey_table(struct mlx5_core_dev
*dev
);
847 int mlx5_core_create_mkey_cb(struct mlx5_core_dev
*dev
,
848 struct mlx5_core_mkey
*mkey
,
850 u32
*out
, int outlen
,
851 mlx5_cmd_cbk_t callback
, void *context
);
852 int mlx5_core_create_mkey(struct mlx5_core_dev
*dev
,
853 struct mlx5_core_mkey
*mkey
,
855 int mlx5_core_destroy_mkey(struct mlx5_core_dev
*dev
,
856 struct mlx5_core_mkey
*mkey
);
857 int mlx5_core_query_mkey(struct mlx5_core_dev
*dev
, struct mlx5_core_mkey
*mkey
,
858 u32
*out
, int outlen
);
859 int mlx5_core_dump_fill_mkey(struct mlx5_core_dev
*dev
, struct mlx5_core_mkey
*_mkey
,
861 int mlx5_core_alloc_pd(struct mlx5_core_dev
*dev
, u32
*pdn
);
862 int mlx5_core_dealloc_pd(struct mlx5_core_dev
*dev
, u32 pdn
);
863 int mlx5_core_mad_ifc(struct mlx5_core_dev
*dev
, const void *inb
, void *outb
,
865 void mlx5_pagealloc_init(struct mlx5_core_dev
*dev
);
866 void mlx5_pagealloc_cleanup(struct mlx5_core_dev
*dev
);
867 int mlx5_pagealloc_start(struct mlx5_core_dev
*dev
);
868 void mlx5_pagealloc_stop(struct mlx5_core_dev
*dev
);
869 void mlx5_core_req_pages_handler(struct mlx5_core_dev
*dev
, u16 func_id
,
871 int mlx5_satisfy_startup_pages(struct mlx5_core_dev
*dev
, int boot
);
872 int mlx5_reclaim_startup_pages(struct mlx5_core_dev
*dev
);
873 void mlx5_register_debugfs(void);
874 void mlx5_unregister_debugfs(void);
875 int mlx5_eq_init(struct mlx5_core_dev
*dev
);
876 void mlx5_eq_cleanup(struct mlx5_core_dev
*dev
);
877 void mlx5_fill_page_array(struct mlx5_buf
*buf
, __be64
*pas
);
878 void mlx5_fill_page_frag_array(struct mlx5_frag_buf
*frag_buf
, __be64
*pas
);
879 void mlx5_cq_completion(struct mlx5_core_dev
*dev
, u32 cqn
);
880 void mlx5_rsc_event(struct mlx5_core_dev
*dev
, u32 rsn
, int event_type
);
881 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
882 void mlx5_eq_pagefault(struct mlx5_core_dev
*dev
, struct mlx5_eqe
*eqe
);
884 void mlx5_srq_event(struct mlx5_core_dev
*dev
, u32 srqn
, int event_type
);
885 struct mlx5_core_srq
*mlx5_core_get_srq(struct mlx5_core_dev
*dev
, u32 srqn
);
886 void mlx5_cmd_comp_handler(struct mlx5_core_dev
*dev
, u64 vec
);
887 void mlx5_cq_event(struct mlx5_core_dev
*dev
, u32 cqn
, int event_type
);
888 int mlx5_create_map_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
, u8 vecidx
,
889 int nent
, u64 mask
, const char *name
, struct mlx5_uar
*uar
);
890 int mlx5_destroy_unmap_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
891 int mlx5_start_eqs(struct mlx5_core_dev
*dev
);
892 int mlx5_stop_eqs(struct mlx5_core_dev
*dev
);
893 int mlx5_vector2eqn(struct mlx5_core_dev
*dev
, int vector
, int *eqn
,
895 int mlx5_core_attach_mcg(struct mlx5_core_dev
*dev
, union ib_gid
*mgid
, u32 qpn
);
896 int mlx5_core_detach_mcg(struct mlx5_core_dev
*dev
, union ib_gid
*mgid
, u32 qpn
);
898 int mlx5_qp_debugfs_init(struct mlx5_core_dev
*dev
);
899 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev
*dev
);
900 int mlx5_core_access_reg(struct mlx5_core_dev
*dev
, void *data_in
,
901 int size_in
, void *data_out
, int size_out
,
902 u16 reg_num
, int arg
, int write
);
904 int mlx5_debug_eq_add(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
905 void mlx5_debug_eq_remove(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
906 int mlx5_core_eq_query(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
,
907 u32
*out
, int outlen
);
908 int mlx5_eq_debugfs_init(struct mlx5_core_dev
*dev
);
909 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev
*dev
);
910 int mlx5_cq_debugfs_init(struct mlx5_core_dev
*dev
);
911 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev
*dev
);
912 int mlx5_db_alloc(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
);
913 int mlx5_db_alloc_node(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
,
915 void mlx5_db_free(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
);
917 const char *mlx5_command_str(int command
);
918 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev
*dev
);
919 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev
*dev
);
920 int mlx5_core_create_psv(struct mlx5_core_dev
*dev
, u32 pdn
,
921 int npsvs
, u32
*sig_index
);
922 int mlx5_core_destroy_psv(struct mlx5_core_dev
*dev
, int psv_num
);
923 void mlx5_core_put_rsc(struct mlx5_core_rsc_common
*common
);
924 int mlx5_query_odp_caps(struct mlx5_core_dev
*dev
,
925 struct mlx5_odp_caps
*odp_caps
);
926 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev
*dev
,
927 u8 port_num
, void *out
, size_t sz
);
929 int mlx5_init_rl_table(struct mlx5_core_dev
*dev
);
930 void mlx5_cleanup_rl_table(struct mlx5_core_dev
*dev
);
931 int mlx5_rl_add_rate(struct mlx5_core_dev
*dev
, u32 rate
, u16
*index
);
932 void mlx5_rl_remove_rate(struct mlx5_core_dev
*dev
, u32 rate
);
933 bool mlx5_rl_is_in_range(struct mlx5_core_dev
*dev
, u32 rate
);
935 static inline int fw_initializing(struct mlx5_core_dev
*dev
)
937 return ioread32be(&dev
->iseg
->initializing
) >> 31;
940 static inline u32
mlx5_mkey_to_idx(u32 mkey
)
945 static inline u32
mlx5_idx_to_mkey(u32 mkey_idx
)
947 return mkey_idx
<< 8;
950 static inline u8
mlx5_mkey_variant(u32 mkey
)
956 MLX5_PROF_MASK_QP_SIZE
= (u64
)1 << 0,
957 MLX5_PROF_MASK_MR_CACHE
= (u64
)1 << 1,
961 MAX_MR_CACHE_ENTRIES
= 16,
965 MLX5_INTERFACE_PROTOCOL_IB
= 0,
966 MLX5_INTERFACE_PROTOCOL_ETH
= 1,
969 struct mlx5_interface
{
970 void * (*add
)(struct mlx5_core_dev
*dev
);
971 void (*remove
)(struct mlx5_core_dev
*dev
, void *context
);
972 int (*attach
)(struct mlx5_core_dev
*dev
, void *context
);
973 void (*detach
)(struct mlx5_core_dev
*dev
, void *context
);
974 void (*event
)(struct mlx5_core_dev
*dev
, void *context
,
975 enum mlx5_dev_event event
, unsigned long param
);
976 void * (*get_dev
)(void *context
);
978 struct list_head list
;
981 void *mlx5_get_protocol_dev(struct mlx5_core_dev
*mdev
, int protocol
);
982 int mlx5_register_interface(struct mlx5_interface
*intf
);
983 void mlx5_unregister_interface(struct mlx5_interface
*intf
);
984 int mlx5_core_query_vendor_id(struct mlx5_core_dev
*mdev
, u32
*vendor_id
);
986 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev
*dev
);
987 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev
*dev
);
988 bool mlx5_lag_is_active(struct mlx5_core_dev
*dev
);
989 struct net_device
*mlx5_lag_get_roce_netdev(struct mlx5_core_dev
*dev
);
991 struct mlx5_profile
{
997 } mr_cache
[MAX_MR_CACHE_ENTRIES
];
1001 MLX5_PCI_DEV_IS_VF
= 1 << 0,
1004 static inline int mlx5_core_is_pf(struct mlx5_core_dev
*dev
)
1006 return !(dev
->priv
.pci_dev_data
& MLX5_PCI_DEV_IS_VF
);
1009 static inline int mlx5_get_gid_table_len(u16 param
)
1012 pr_warn("gid table length is zero\n");
1016 return 8 * (1 << param
);
1019 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev
*dev
)
1021 return !!(dev
->priv
.rl_table
.max_size
);
1025 MLX5_TRIGGERED_CMD_COMP
= (u64
)1 << 32,
1028 #endif /* MLX5_DRIVER_H */