2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/spinlock_types.h>
40 #include <linux/semaphore.h>
41 #include <linux/slab.h>
42 #include <linux/vmalloc.h>
43 #include <linux/radix-tree.h>
44 #include <linux/workqueue.h>
45 #include <linux/interrupt.h>
47 #include <linux/mlx5/device.h>
48 #include <linux/mlx5/doorbell.h>
49 #include <linux/mlx5/srq.h>
52 MLX5_BOARD_ID_LEN
= 64,
53 MLX5_MAX_NAME_LEN
= 16,
57 /* one minute for the sake of bringup. Generally, commands must always
58 * complete and we may need to increase this timeout value
60 MLX5_CMD_TIMEOUT_MSEC
= 60 * 1000,
61 MLX5_CMD_WQ_MAX_NAME
= 32,
67 CMD_STATUS_SUCCESS
= 0,
73 MLX5_SQP_IEEE_1588
= 2,
75 MLX5_SQP_SYNC_UMR
= 4,
83 MLX5_EQ_VEC_PAGES
= 0,
85 MLX5_EQ_VEC_ASYNC
= 2,
86 MLX5_EQ_VEC_COMP_BASE
,
90 MLX5_MAX_IRQ_NAME
= 32
94 MLX5_ATOMIC_MODE_IB_COMP
= 1 << 16,
95 MLX5_ATOMIC_MODE_CX
= 2 << 16,
96 MLX5_ATOMIC_MODE_8B
= 3 << 16,
97 MLX5_ATOMIC_MODE_16B
= 4 << 16,
98 MLX5_ATOMIC_MODE_32B
= 5 << 16,
99 MLX5_ATOMIC_MODE_64B
= 6 << 16,
100 MLX5_ATOMIC_MODE_128B
= 7 << 16,
101 MLX5_ATOMIC_MODE_256B
= 8 << 16,
105 MLX5_REG_QETCR
= 0x4005,
106 MLX5_REG_QTCT
= 0x400a,
107 MLX5_REG_DCBX_PARAM
= 0x4020,
108 MLX5_REG_DCBX_APP
= 0x4021,
109 MLX5_REG_PCAP
= 0x5001,
110 MLX5_REG_PMTU
= 0x5003,
111 MLX5_REG_PTYS
= 0x5004,
112 MLX5_REG_PAOS
= 0x5006,
113 MLX5_REG_PFCC
= 0x5007,
114 MLX5_REG_PPCNT
= 0x5008,
115 MLX5_REG_PMAOS
= 0x5012,
116 MLX5_REG_PUDE
= 0x5009,
117 MLX5_REG_PMPE
= 0x5010,
118 MLX5_REG_PELC
= 0x500e,
119 MLX5_REG_PVLC
= 0x500f,
120 MLX5_REG_PCMR
= 0x5041,
121 MLX5_REG_PMLP
= 0x5002,
122 MLX5_REG_NODE_DESC
= 0x6001,
123 MLX5_REG_HOST_ENDIANNESS
= 0x7004,
124 MLX5_REG_MCIA
= 0x9014,
125 MLX5_REG_MLCR
= 0x902b,
126 MLX5_REG_MPCNT
= 0x9051,
129 enum mlx5_dcbx_oper_mode
{
130 MLX5E_DCBX_PARAM_VER_OPER_HOST
= 0x0,
131 MLX5E_DCBX_PARAM_VER_OPER_AUTO
= 0x3,
135 MLX5_ATOMIC_OPS_CMP_SWAP
= 1 << 0,
136 MLX5_ATOMIC_OPS_FETCH_ADD
= 1 << 1,
139 enum mlx5_page_fault_resume_flags
{
140 MLX5_PAGE_FAULT_RESUME_REQUESTOR
= 1 << 0,
141 MLX5_PAGE_FAULT_RESUME_WRITE
= 1 << 1,
142 MLX5_PAGE_FAULT_RESUME_RDMA
= 1 << 2,
143 MLX5_PAGE_FAULT_RESUME_ERROR
= 1 << 7,
152 struct mlx5_field_desc
{
157 struct mlx5_rsc_debug
{
158 struct mlx5_core_dev
*dev
;
160 enum dbg_rsc_type type
;
162 struct mlx5_field_desc fields
[0];
165 enum mlx5_dev_event
{
166 MLX5_DEV_EVENT_SYS_ERROR
,
167 MLX5_DEV_EVENT_PORT_UP
,
168 MLX5_DEV_EVENT_PORT_DOWN
,
169 MLX5_DEV_EVENT_PORT_INITIALIZED
,
170 MLX5_DEV_EVENT_LID_CHANGE
,
171 MLX5_DEV_EVENT_PKEY_CHANGE
,
172 MLX5_DEV_EVENT_GUID_CHANGE
,
173 MLX5_DEV_EVENT_CLIENT_REREG
,
176 enum mlx5_port_status
{
181 struct mlx5_uuar_info
{
182 struct mlx5_uar
*uars
;
184 int num_low_latency_uuars
;
185 unsigned long *bitmap
;
190 * protect uuar allocation data structs
198 void __iomem
*regreg
;
200 struct mlx5_uar
*uar
;
201 unsigned long offset
;
203 /* protect blue flame buffer selection when needed
207 /* serialize 64 bit writes when done as two 32 bit accesses
213 struct mlx5_cmd_first
{
217 struct mlx5_cmd_msg
{
218 struct list_head list
;
219 struct cmd_msg_cache
*parent
;
221 struct mlx5_cmd_first first
;
222 struct mlx5_cmd_mailbox
*next
;
225 struct mlx5_cmd_debug
{
226 struct dentry
*dbg_root
;
227 struct dentry
*dbg_in
;
228 struct dentry
*dbg_out
;
229 struct dentry
*dbg_outlen
;
230 struct dentry
*dbg_status
;
231 struct dentry
*dbg_run
;
239 struct cmd_msg_cache
{
240 /* protect block chain allocations
243 struct list_head head
;
244 unsigned int max_inbox_size
;
245 unsigned int num_ent
;
249 MLX5_NUM_COMMAND_CACHES
= 5,
252 struct mlx5_cmd_stats
{
257 struct dentry
*count
;
258 /* protect command average calculations */
264 dma_addr_t alloc_dma
;
275 /* protect command queue allocations
277 spinlock_t alloc_lock
;
279 /* protect token allocations
281 spinlock_t token_lock
;
283 unsigned long bitmask
;
284 char wq_name
[MLX5_CMD_WQ_MAX_NAME
];
285 struct workqueue_struct
*wq
;
286 struct semaphore sem
;
287 struct semaphore pages_sem
;
289 struct mlx5_cmd_work_ent
*ent_arr
[MLX5_MAX_COMMANDS
];
290 struct pci_pool
*pool
;
291 struct mlx5_cmd_debug dbg
;
292 struct cmd_msg_cache cache
[MLX5_NUM_COMMAND_CACHES
];
293 int checksum_disabled
;
294 struct mlx5_cmd_stats stats
[MLX5_CMD_OP_MAX
];
297 struct mlx5_port_caps
{
303 struct mlx5_cmd_mailbox
{
306 struct mlx5_cmd_mailbox
*next
;
309 struct mlx5_buf_list
{
315 struct mlx5_buf_list direct
;
321 struct mlx5_frag_buf
{
322 struct mlx5_buf_list
*frags
;
328 struct mlx5_eq_tasklet
{
329 struct list_head list
;
330 struct list_head process_list
;
331 struct tasklet_struct task
;
332 /* lock on completion tasklet list */
337 struct mlx5_core_dev
*dev
;
338 __be32 __iomem
*doorbell
;
346 struct list_head list
;
348 struct mlx5_rsc_debug
*dbg
;
349 struct mlx5_eq_tasklet tasklet_ctx
;
352 struct mlx5_core_psv
{
364 struct mlx5_core_sig_ctx
{
365 struct mlx5_core_psv psv_memory
;
366 struct mlx5_core_psv psv_wire
;
367 struct ib_sig_err err_item
;
368 bool sig_status_checked
;
373 struct mlx5_core_mkey
{
381 MLX5_RES_QP
= MLX5_EVENT_QUEUE_TYPE_QP
,
382 MLX5_RES_RQ
= MLX5_EVENT_QUEUE_TYPE_RQ
,
383 MLX5_RES_SQ
= MLX5_EVENT_QUEUE_TYPE_SQ
,
388 struct mlx5_core_rsc_common
{
389 enum mlx5_res_type res
;
391 struct completion free
;
394 struct mlx5_core_srq
{
395 struct mlx5_core_rsc_common common
; /* must be first */
399 int max_avail_gather
;
401 void (*event
) (struct mlx5_core_srq
*, enum mlx5_event
);
404 struct completion free
;
407 struct mlx5_eq_table
{
408 void __iomem
*update_ci
;
409 void __iomem
*update_arm_ci
;
410 struct list_head comp_eqs_list
;
411 struct mlx5_eq pages_eq
;
412 struct mlx5_eq async_eq
;
413 struct mlx5_eq cmd_eq
;
414 int num_comp_vectors
;
422 struct list_head bf_list
;
423 unsigned free_bf_bmap
;
424 void __iomem
*bf_map
;
429 struct mlx5_core_health
{
430 struct health_buffer __iomem
*health
;
431 __be32 __iomem
*health_counter
;
432 struct timer_list timer
;
436 /* wq spinlock to synchronize draining */
438 struct workqueue_struct
*wq
;
440 struct work_struct work
;
441 struct delayed_work recover_work
;
444 struct mlx5_cq_table
{
445 /* protect radix tree
448 struct radix_tree_root tree
;
451 struct mlx5_qp_table
{
452 /* protect radix tree
455 struct radix_tree_root tree
;
458 struct mlx5_srq_table
{
459 /* protect radix tree
462 struct radix_tree_root tree
;
465 struct mlx5_mkey_table
{
466 /* protect radix tree
469 struct radix_tree_root tree
;
472 struct mlx5_vf_context
{
476 struct mlx5_core_sriov
{
477 struct mlx5_vf_context
*vfs_ctx
;
482 struct mlx5_irq_info
{
484 char name
[MLX5_MAX_IRQ_NAME
];
487 struct mlx5_fc_stats
{
488 struct rb_root counters
;
489 struct list_head addlist
;
490 /* protect addlist add/splice operations */
491 spinlock_t addlist_lock
;
493 struct workqueue_struct
*wq
;
494 struct delayed_work work
;
495 unsigned long next_query
;
501 struct mlx5_rl_entry
{
507 struct mlx5_rl_table
{
508 /* protect rate limit table */
509 struct mutex rl_lock
;
513 struct mlx5_rl_entry
*rl_entry
;
516 enum port_module_event_status_type
{
517 MLX5_MODULE_STATUS_PLUGGED
= 0x1,
518 MLX5_MODULE_STATUS_UNPLUGGED
= 0x2,
519 MLX5_MODULE_STATUS_ERROR
= 0x3,
520 MLX5_MODULE_STATUS_NUM
= 0x3,
523 enum port_module_event_error_type
{
524 MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED
,
525 MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE
,
526 MLX5_MODULE_EVENT_ERROR_BUS_STUCK
,
527 MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT
,
528 MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST
,
529 MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER
,
530 MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE
,
531 MLX5_MODULE_EVENT_ERROR_BAD_CABLE
,
532 MLX5_MODULE_EVENT_ERROR_UNKNOWN
,
533 MLX5_MODULE_EVENT_ERROR_NUM
,
536 struct mlx5_port_module_event_stats
{
537 u64 status_counters
[MLX5_MODULE_STATUS_NUM
];
538 u64 error_counters
[MLX5_MODULE_EVENT_ERROR_NUM
];
542 char name
[MLX5_MAX_NAME_LEN
];
543 struct mlx5_eq_table eq_table
;
544 struct msix_entry
*msix_arr
;
545 struct mlx5_irq_info
*irq_info
;
546 struct mlx5_uuar_info uuari
;
547 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock
);
550 struct workqueue_struct
*pg_wq
;
551 struct rb_root page_root
;
554 struct list_head free_list
;
557 struct mlx5_core_health health
;
559 struct mlx5_srq_table srq_table
;
561 /* start: qp staff */
562 struct mlx5_qp_table qp_table
;
563 struct dentry
*qp_debugfs
;
564 struct dentry
*eq_debugfs
;
565 struct dentry
*cq_debugfs
;
566 struct dentry
*cmdif_debugfs
;
569 /* start: cq staff */
570 struct mlx5_cq_table cq_table
;
573 /* start: mkey staff */
574 struct mlx5_mkey_table mkey_table
;
575 /* end: mkey staff */
577 /* start: alloc staff */
578 /* protect buffer alocation according to numa node */
579 struct mutex alloc_mutex
;
582 struct mutex pgdir_mutex
;
583 struct list_head pgdir_list
;
584 /* end: alloc staff */
585 struct dentry
*dbg_root
;
587 /* protect mkey key part */
588 spinlock_t mkey_lock
;
591 struct list_head dev_list
;
592 struct list_head ctx_list
;
595 struct mlx5_flow_steering
*steering
;
596 struct mlx5_eswitch
*eswitch
;
597 struct mlx5_core_sriov sriov
;
598 struct mlx5_lag
*lag
;
599 unsigned long pci_dev_data
;
600 struct mlx5_fc_stats fc_stats
;
601 struct mlx5_rl_table rl_table
;
603 struct mlx5_port_module_event_stats pme_stats
;
606 enum mlx5_device_state
{
607 MLX5_DEVICE_STATE_UP
,
608 MLX5_DEVICE_STATE_INTERNAL_ERROR
,
611 enum mlx5_interface_state
{
612 MLX5_INTERFACE_STATE_DOWN
= BIT(0),
613 MLX5_INTERFACE_STATE_UP
= BIT(1),
614 MLX5_INTERFACE_STATE_SHUTDOWN
= BIT(2),
617 enum mlx5_pci_status
{
618 MLX5_PCI_STATUS_DISABLED
,
619 MLX5_PCI_STATUS_ENABLED
,
623 struct list_head tirs_list
;
627 struct mlx5e_resources
{
628 struct mlx5_uar cq_uar
;
631 struct mlx5_core_mkey mkey
;
634 struct mlx5_core_dev
{
635 struct pci_dev
*pdev
;
637 struct mutex pci_status_mutex
;
638 enum mlx5_pci_status pci_status
;
640 char board_id
[MLX5_BOARD_ID_LEN
];
642 struct mlx5_port_caps port_caps
[MLX5_MAX_PORTS
];
643 u32 hca_caps_cur
[MLX5_CAP_NUM
][MLX5_UN_SZ_DW(hca_cap_union
)];
644 u32 hca_caps_max
[MLX5_CAP_NUM
][MLX5_UN_SZ_DW(hca_cap_union
)];
645 phys_addr_t iseg_base
;
646 struct mlx5_init_seg __iomem
*iseg
;
647 enum mlx5_device_state state
;
648 /* sync interface state */
649 struct mutex intf_state_mutex
;
650 unsigned long intf_state
;
651 void (*event
) (struct mlx5_core_dev
*dev
,
652 enum mlx5_dev_event event
,
653 unsigned long param
);
654 struct mlx5_priv priv
;
655 struct mlx5_profile
*profile
;
658 struct mlx5e_resources mlx5e_res
;
659 #ifdef CONFIG_RFS_ACCEL
660 struct cpu_rmap
*rmap
;
667 struct mlx5_db_pgdir
*pgdir
;
668 struct mlx5_ib_user_db_page
*user_page
;
675 MLX5_COMP_EQ_SIZE
= 1024,
679 MLX5_PTYS_IB
= 1 << 0,
680 MLX5_PTYS_EN
= 1 << 2,
683 typedef void (*mlx5_cmd_cbk_t
)(int status
, void *context
);
685 struct mlx5_cmd_work_ent
{
686 struct mlx5_cmd_msg
*in
;
687 struct mlx5_cmd_msg
*out
;
690 mlx5_cmd_cbk_t callback
;
691 struct delayed_work cb_timeout_work
;
694 struct completion done
;
695 struct mlx5_cmd
*cmd
;
696 struct work_struct work
;
697 struct mlx5_cmd_layout
*lay
;
712 enum port_state_policy
{
713 MLX5_POLICY_DOWN
= 0,
715 MLX5_POLICY_FOLLOW
= 2,
716 MLX5_POLICY_INVALID
= 0xffffffff
719 enum phy_port_state
{
723 struct mlx5_hca_vport_context
{
728 enum port_state_policy policy
;
729 enum phy_port_state phys_state
;
730 enum ib_port_state vport_state
;
731 u8 port_physical_state
;
740 u8 init_type_reply
; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
745 u16 qkey_violation_counter
;
746 u16 pkey_violation_counter
;
750 static inline void *mlx5_buf_offset(struct mlx5_buf
*buf
, int offset
)
752 return buf
->direct
.buf
+ offset
;
755 extern struct workqueue_struct
*mlx5_core_wq
;
757 #define STRUCT_FIELD(header, field) \
758 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
759 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
761 static inline struct mlx5_core_dev
*pci2mlx5_core_dev(struct pci_dev
*pdev
)
763 return pci_get_drvdata(pdev
);
766 extern struct dentry
*mlx5_debugfs_root
;
768 static inline u16
fw_rev_maj(struct mlx5_core_dev
*dev
)
770 return ioread32be(&dev
->iseg
->fw_rev
) & 0xffff;
773 static inline u16
fw_rev_min(struct mlx5_core_dev
*dev
)
775 return ioread32be(&dev
->iseg
->fw_rev
) >> 16;
778 static inline u16
fw_rev_sub(struct mlx5_core_dev
*dev
)
780 return ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) & 0xffff;
783 static inline u16
cmdif_rev(struct mlx5_core_dev
*dev
)
785 return ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) >> 16;
788 static inline void *mlx5_vzalloc(unsigned long size
)
792 rtn
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
798 static inline u32
mlx5_base_mkey(const u32 key
)
800 return key
& 0xffffff00u
;
803 int mlx5_cmd_init(struct mlx5_core_dev
*dev
);
804 void mlx5_cmd_cleanup(struct mlx5_core_dev
*dev
);
805 void mlx5_cmd_use_events(struct mlx5_core_dev
*dev
);
806 void mlx5_cmd_use_polling(struct mlx5_core_dev
*dev
);
808 int mlx5_cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
810 int mlx5_cmd_exec_cb(struct mlx5_core_dev
*dev
, void *in
, int in_size
,
811 void *out
, int out_size
, mlx5_cmd_cbk_t callback
,
813 void mlx5_cmd_mbox_status(void *out
, u8
*status
, u32
*syndrome
);
815 int mlx5_core_get_caps(struct mlx5_core_dev
*dev
, enum mlx5_cap_type cap_type
);
816 int mlx5_cmd_alloc_uar(struct mlx5_core_dev
*dev
, u32
*uarn
);
817 int mlx5_cmd_free_uar(struct mlx5_core_dev
*dev
, u32 uarn
);
818 int mlx5_alloc_uuars(struct mlx5_core_dev
*dev
, struct mlx5_uuar_info
*uuari
);
819 int mlx5_free_uuars(struct mlx5_core_dev
*dev
, struct mlx5_uuar_info
*uuari
);
820 int mlx5_alloc_map_uar(struct mlx5_core_dev
*mdev
, struct mlx5_uar
*uar
,
822 void mlx5_unmap_free_uar(struct mlx5_core_dev
*mdev
, struct mlx5_uar
*uar
);
823 void mlx5_health_cleanup(struct mlx5_core_dev
*dev
);
824 int mlx5_health_init(struct mlx5_core_dev
*dev
);
825 void mlx5_start_health_poll(struct mlx5_core_dev
*dev
);
826 void mlx5_stop_health_poll(struct mlx5_core_dev
*dev
);
827 void mlx5_drain_health_wq(struct mlx5_core_dev
*dev
);
828 int mlx5_buf_alloc_node(struct mlx5_core_dev
*dev
, int size
,
829 struct mlx5_buf
*buf
, int node
);
830 int mlx5_buf_alloc(struct mlx5_core_dev
*dev
, int size
, struct mlx5_buf
*buf
);
831 void mlx5_buf_free(struct mlx5_core_dev
*dev
, struct mlx5_buf
*buf
);
832 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev
*dev
, int size
,
833 struct mlx5_frag_buf
*buf
, int node
);
834 void mlx5_frag_buf_free(struct mlx5_core_dev
*dev
, struct mlx5_frag_buf
*buf
);
835 struct mlx5_cmd_mailbox
*mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev
*dev
,
836 gfp_t flags
, int npages
);
837 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev
*dev
,
838 struct mlx5_cmd_mailbox
*head
);
839 int mlx5_core_create_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
840 struct mlx5_srq_attr
*in
);
841 int mlx5_core_destroy_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
);
842 int mlx5_core_query_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
843 struct mlx5_srq_attr
*out
);
844 int mlx5_core_arm_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
845 u16 lwm
, int is_srq
);
846 void mlx5_init_mkey_table(struct mlx5_core_dev
*dev
);
847 void mlx5_cleanup_mkey_table(struct mlx5_core_dev
*dev
);
848 int mlx5_core_create_mkey_cb(struct mlx5_core_dev
*dev
,
849 struct mlx5_core_mkey
*mkey
,
851 u32
*out
, int outlen
,
852 mlx5_cmd_cbk_t callback
, void *context
);
853 int mlx5_core_create_mkey(struct mlx5_core_dev
*dev
,
854 struct mlx5_core_mkey
*mkey
,
856 int mlx5_core_destroy_mkey(struct mlx5_core_dev
*dev
,
857 struct mlx5_core_mkey
*mkey
);
858 int mlx5_core_query_mkey(struct mlx5_core_dev
*dev
, struct mlx5_core_mkey
*mkey
,
859 u32
*out
, int outlen
);
860 int mlx5_core_dump_fill_mkey(struct mlx5_core_dev
*dev
, struct mlx5_core_mkey
*_mkey
,
862 int mlx5_core_alloc_pd(struct mlx5_core_dev
*dev
, u32
*pdn
);
863 int mlx5_core_dealloc_pd(struct mlx5_core_dev
*dev
, u32 pdn
);
864 int mlx5_core_mad_ifc(struct mlx5_core_dev
*dev
, const void *inb
, void *outb
,
866 void mlx5_pagealloc_init(struct mlx5_core_dev
*dev
);
867 void mlx5_pagealloc_cleanup(struct mlx5_core_dev
*dev
);
868 int mlx5_pagealloc_start(struct mlx5_core_dev
*dev
);
869 void mlx5_pagealloc_stop(struct mlx5_core_dev
*dev
);
870 void mlx5_core_req_pages_handler(struct mlx5_core_dev
*dev
, u16 func_id
,
872 int mlx5_satisfy_startup_pages(struct mlx5_core_dev
*dev
, int boot
);
873 int mlx5_reclaim_startup_pages(struct mlx5_core_dev
*dev
);
874 void mlx5_register_debugfs(void);
875 void mlx5_unregister_debugfs(void);
876 int mlx5_eq_init(struct mlx5_core_dev
*dev
);
877 void mlx5_eq_cleanup(struct mlx5_core_dev
*dev
);
878 void mlx5_fill_page_array(struct mlx5_buf
*buf
, __be64
*pas
);
879 void mlx5_fill_page_frag_array(struct mlx5_frag_buf
*frag_buf
, __be64
*pas
);
880 void mlx5_cq_completion(struct mlx5_core_dev
*dev
, u32 cqn
);
881 void mlx5_rsc_event(struct mlx5_core_dev
*dev
, u32 rsn
, int event_type
);
882 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
883 void mlx5_eq_pagefault(struct mlx5_core_dev
*dev
, struct mlx5_eqe
*eqe
);
885 void mlx5_srq_event(struct mlx5_core_dev
*dev
, u32 srqn
, int event_type
);
886 struct mlx5_core_srq
*mlx5_core_get_srq(struct mlx5_core_dev
*dev
, u32 srqn
);
887 void mlx5_cmd_comp_handler(struct mlx5_core_dev
*dev
, u64 vec
);
888 void mlx5_cq_event(struct mlx5_core_dev
*dev
, u32 cqn
, int event_type
);
889 int mlx5_create_map_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
, u8 vecidx
,
890 int nent
, u64 mask
, const char *name
, struct mlx5_uar
*uar
);
891 int mlx5_destroy_unmap_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
892 int mlx5_start_eqs(struct mlx5_core_dev
*dev
);
893 int mlx5_stop_eqs(struct mlx5_core_dev
*dev
);
894 int mlx5_vector2eqn(struct mlx5_core_dev
*dev
, int vector
, int *eqn
,
896 int mlx5_core_attach_mcg(struct mlx5_core_dev
*dev
, union ib_gid
*mgid
, u32 qpn
);
897 int mlx5_core_detach_mcg(struct mlx5_core_dev
*dev
, union ib_gid
*mgid
, u32 qpn
);
899 int mlx5_qp_debugfs_init(struct mlx5_core_dev
*dev
);
900 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev
*dev
);
901 int mlx5_core_access_reg(struct mlx5_core_dev
*dev
, void *data_in
,
902 int size_in
, void *data_out
, int size_out
,
903 u16 reg_num
, int arg
, int write
);
905 int mlx5_debug_eq_add(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
906 void mlx5_debug_eq_remove(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
907 int mlx5_core_eq_query(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
,
908 u32
*out
, int outlen
);
909 int mlx5_eq_debugfs_init(struct mlx5_core_dev
*dev
);
910 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev
*dev
);
911 int mlx5_cq_debugfs_init(struct mlx5_core_dev
*dev
);
912 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev
*dev
);
913 int mlx5_db_alloc(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
);
914 int mlx5_db_alloc_node(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
,
916 void mlx5_db_free(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
);
918 const char *mlx5_command_str(int command
);
919 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev
*dev
);
920 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev
*dev
);
921 int mlx5_core_create_psv(struct mlx5_core_dev
*dev
, u32 pdn
,
922 int npsvs
, u32
*sig_index
);
923 int mlx5_core_destroy_psv(struct mlx5_core_dev
*dev
, int psv_num
);
924 void mlx5_core_put_rsc(struct mlx5_core_rsc_common
*common
);
925 int mlx5_query_odp_caps(struct mlx5_core_dev
*dev
,
926 struct mlx5_odp_caps
*odp_caps
);
927 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev
*dev
,
928 u8 port_num
, void *out
, size_t sz
);
930 int mlx5_init_rl_table(struct mlx5_core_dev
*dev
);
931 void mlx5_cleanup_rl_table(struct mlx5_core_dev
*dev
);
932 int mlx5_rl_add_rate(struct mlx5_core_dev
*dev
, u32 rate
, u16
*index
);
933 void mlx5_rl_remove_rate(struct mlx5_core_dev
*dev
, u32 rate
);
934 bool mlx5_rl_is_in_range(struct mlx5_core_dev
*dev
, u32 rate
);
936 static inline int fw_initializing(struct mlx5_core_dev
*dev
)
938 return ioread32be(&dev
->iseg
->initializing
) >> 31;
941 static inline u32
mlx5_mkey_to_idx(u32 mkey
)
946 static inline u32
mlx5_idx_to_mkey(u32 mkey_idx
)
948 return mkey_idx
<< 8;
951 static inline u8
mlx5_mkey_variant(u32 mkey
)
957 MLX5_PROF_MASK_QP_SIZE
= (u64
)1 << 0,
958 MLX5_PROF_MASK_MR_CACHE
= (u64
)1 << 1,
962 MAX_MR_CACHE_ENTRIES
= 16,
966 MLX5_INTERFACE_PROTOCOL_IB
= 0,
967 MLX5_INTERFACE_PROTOCOL_ETH
= 1,
970 struct mlx5_interface
{
971 void * (*add
)(struct mlx5_core_dev
*dev
);
972 void (*remove
)(struct mlx5_core_dev
*dev
, void *context
);
973 int (*attach
)(struct mlx5_core_dev
*dev
, void *context
);
974 void (*detach
)(struct mlx5_core_dev
*dev
, void *context
);
975 void (*event
)(struct mlx5_core_dev
*dev
, void *context
,
976 enum mlx5_dev_event event
, unsigned long param
);
977 void * (*get_dev
)(void *context
);
979 struct list_head list
;
982 void *mlx5_get_protocol_dev(struct mlx5_core_dev
*mdev
, int protocol
);
983 int mlx5_register_interface(struct mlx5_interface
*intf
);
984 void mlx5_unregister_interface(struct mlx5_interface
*intf
);
985 int mlx5_core_query_vendor_id(struct mlx5_core_dev
*mdev
, u32
*vendor_id
);
987 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev
*dev
);
988 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev
*dev
);
989 bool mlx5_lag_is_active(struct mlx5_core_dev
*dev
);
990 struct net_device
*mlx5_lag_get_roce_netdev(struct mlx5_core_dev
*dev
);
992 struct mlx5_profile
{
998 } mr_cache
[MAX_MR_CACHE_ENTRIES
];
1002 MLX5_PCI_DEV_IS_VF
= 1 << 0,
1005 static inline int mlx5_core_is_pf(struct mlx5_core_dev
*dev
)
1007 return !(dev
->priv
.pci_dev_data
& MLX5_PCI_DEV_IS_VF
);
1010 static inline int mlx5_get_gid_table_len(u16 param
)
1013 pr_warn("gid table length is zero\n");
1017 return 8 * (1 << param
);
1020 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev
*dev
)
1022 return !!(dev
->priv
.rl_table
.max_size
);
1026 MLX5_TRIGGERED_CMD_COMP
= (u64
)1 << 32,
1029 #endif /* MLX5_DRIVER_H */