2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/spinlock_types.h>
40 #include <linux/semaphore.h>
41 #include <linux/slab.h>
42 #include <linux/vmalloc.h>
43 #include <linux/radix-tree.h>
44 #include <linux/workqueue.h>
45 #include <linux/interrupt.h>
47 #include <linux/mlx5/device.h>
48 #include <linux/mlx5/doorbell.h>
49 #include <linux/mlx5/srq.h>
52 MLX5_BOARD_ID_LEN
= 64,
53 MLX5_MAX_NAME_LEN
= 16,
57 /* one minute for the sake of bringup. Generally, commands must always
58 * complete and we may need to increase this timeout value
60 MLX5_CMD_TIMEOUT_MSEC
= 60 * 1000,
61 MLX5_CMD_WQ_MAX_NAME
= 32,
67 CMD_STATUS_SUCCESS
= 0,
73 MLX5_SQP_IEEE_1588
= 2,
75 MLX5_SQP_SYNC_UMR
= 4,
83 MLX5_EQ_VEC_PAGES
= 0,
85 MLX5_EQ_VEC_ASYNC
= 2,
86 MLX5_EQ_VEC_COMP_BASE
,
90 MLX5_MAX_IRQ_NAME
= 32
94 MLX5_ATOMIC_MODE_IB_COMP
= 1 << 16,
95 MLX5_ATOMIC_MODE_CX
= 2 << 16,
96 MLX5_ATOMIC_MODE_8B
= 3 << 16,
97 MLX5_ATOMIC_MODE_16B
= 4 << 16,
98 MLX5_ATOMIC_MODE_32B
= 5 << 16,
99 MLX5_ATOMIC_MODE_64B
= 6 << 16,
100 MLX5_ATOMIC_MODE_128B
= 7 << 16,
101 MLX5_ATOMIC_MODE_256B
= 8 << 16,
105 MLX5_REG_QETCR
= 0x4005,
106 MLX5_REG_QTCT
= 0x400a,
107 MLX5_REG_PCAP
= 0x5001,
108 MLX5_REG_PMTU
= 0x5003,
109 MLX5_REG_PTYS
= 0x5004,
110 MLX5_REG_PAOS
= 0x5006,
111 MLX5_REG_PFCC
= 0x5007,
112 MLX5_REG_PPCNT
= 0x5008,
113 MLX5_REG_PMAOS
= 0x5012,
114 MLX5_REG_PUDE
= 0x5009,
115 MLX5_REG_PMPE
= 0x5010,
116 MLX5_REG_PELC
= 0x500e,
117 MLX5_REG_PVLC
= 0x500f,
118 MLX5_REG_PCMR
= 0x5041,
119 MLX5_REG_PMLP
= 0x5002,
120 MLX5_REG_NODE_DESC
= 0x6001,
121 MLX5_REG_HOST_ENDIANNESS
= 0x7004,
122 MLX5_REG_MCIA
= 0x9014,
123 MLX5_REG_MLCR
= 0x902b,
127 MLX5_ATOMIC_OPS_CMP_SWAP
= 1 << 0,
128 MLX5_ATOMIC_OPS_FETCH_ADD
= 1 << 1,
131 enum mlx5_page_fault_resume_flags
{
132 MLX5_PAGE_FAULT_RESUME_REQUESTOR
= 1 << 0,
133 MLX5_PAGE_FAULT_RESUME_WRITE
= 1 << 1,
134 MLX5_PAGE_FAULT_RESUME_RDMA
= 1 << 2,
135 MLX5_PAGE_FAULT_RESUME_ERROR
= 1 << 7,
144 struct mlx5_field_desc
{
149 struct mlx5_rsc_debug
{
150 struct mlx5_core_dev
*dev
;
152 enum dbg_rsc_type type
;
154 struct mlx5_field_desc fields
[0];
157 enum mlx5_dev_event
{
158 MLX5_DEV_EVENT_SYS_ERROR
,
159 MLX5_DEV_EVENT_PORT_UP
,
160 MLX5_DEV_EVENT_PORT_DOWN
,
161 MLX5_DEV_EVENT_PORT_INITIALIZED
,
162 MLX5_DEV_EVENT_LID_CHANGE
,
163 MLX5_DEV_EVENT_PKEY_CHANGE
,
164 MLX5_DEV_EVENT_GUID_CHANGE
,
165 MLX5_DEV_EVENT_CLIENT_REREG
,
168 enum mlx5_port_status
{
173 struct mlx5_uuar_info
{
174 struct mlx5_uar
*uars
;
176 int num_low_latency_uuars
;
177 unsigned long *bitmap
;
182 * protect uuar allocation data structs
190 void __iomem
*regreg
;
192 struct mlx5_uar
*uar
;
193 unsigned long offset
;
195 /* protect blue flame buffer selection when needed
199 /* serialize 64 bit writes when done as two 32 bit accesses
205 struct mlx5_cmd_first
{
209 struct mlx5_cmd_msg
{
210 struct list_head list
;
211 struct cache_ent
*cache
;
213 struct mlx5_cmd_first first
;
214 struct mlx5_cmd_mailbox
*next
;
217 struct mlx5_cmd_debug
{
218 struct dentry
*dbg_root
;
219 struct dentry
*dbg_in
;
220 struct dentry
*dbg_out
;
221 struct dentry
*dbg_outlen
;
222 struct dentry
*dbg_status
;
223 struct dentry
*dbg_run
;
232 /* protect block chain allocations
235 struct list_head head
;
238 struct cmd_msg_cache
{
239 struct cache_ent large
;
240 struct cache_ent med
;
244 struct mlx5_cmd_stats
{
249 struct dentry
*count
;
250 /* protect command average calculations */
256 dma_addr_t alloc_dma
;
267 /* protect command queue allocations
269 spinlock_t alloc_lock
;
271 /* protect token allocations
273 spinlock_t token_lock
;
275 unsigned long bitmask
;
276 char wq_name
[MLX5_CMD_WQ_MAX_NAME
];
277 struct workqueue_struct
*wq
;
278 struct semaphore sem
;
279 struct semaphore pages_sem
;
281 struct mlx5_cmd_work_ent
*ent_arr
[MLX5_MAX_COMMANDS
];
282 struct pci_pool
*pool
;
283 struct mlx5_cmd_debug dbg
;
284 struct cmd_msg_cache cache
;
285 int checksum_disabled
;
286 struct mlx5_cmd_stats stats
[MLX5_CMD_OP_MAX
];
289 struct mlx5_port_caps
{
295 struct mlx5_cmd_mailbox
{
298 struct mlx5_cmd_mailbox
*next
;
301 struct mlx5_buf_list
{
307 struct mlx5_buf_list direct
;
313 struct mlx5_eq_tasklet
{
314 struct list_head list
;
315 struct list_head process_list
;
316 struct tasklet_struct task
;
317 /* lock on completion tasklet list */
322 struct mlx5_core_dev
*dev
;
323 __be32 __iomem
*doorbell
;
331 struct list_head list
;
333 struct mlx5_rsc_debug
*dbg
;
334 struct mlx5_eq_tasklet tasklet_ctx
;
337 struct mlx5_core_psv
{
349 struct mlx5_core_sig_ctx
{
350 struct mlx5_core_psv psv_memory
;
351 struct mlx5_core_psv psv_wire
;
352 struct ib_sig_err err_item
;
353 bool sig_status_checked
;
358 struct mlx5_core_mkey
{
366 MLX5_RES_QP
= MLX5_EVENT_QUEUE_TYPE_QP
,
367 MLX5_RES_RQ
= MLX5_EVENT_QUEUE_TYPE_RQ
,
368 MLX5_RES_SQ
= MLX5_EVENT_QUEUE_TYPE_SQ
,
373 struct mlx5_core_rsc_common
{
374 enum mlx5_res_type res
;
376 struct completion free
;
379 struct mlx5_core_srq
{
380 struct mlx5_core_rsc_common common
; /* must be first */
384 int max_avail_gather
;
386 void (*event
) (struct mlx5_core_srq
*, enum mlx5_event
);
389 struct completion free
;
392 struct mlx5_eq_table
{
393 void __iomem
*update_ci
;
394 void __iomem
*update_arm_ci
;
395 struct list_head comp_eqs_list
;
396 struct mlx5_eq pages_eq
;
397 struct mlx5_eq async_eq
;
398 struct mlx5_eq cmd_eq
;
399 int num_comp_vectors
;
407 struct list_head bf_list
;
408 unsigned free_bf_bmap
;
409 void __iomem
*bf_map
;
414 struct mlx5_core_health
{
415 struct health_buffer __iomem
*health
;
416 __be32 __iomem
*health_counter
;
417 struct timer_list timer
;
421 struct workqueue_struct
*wq
;
422 struct work_struct work
;
425 struct mlx5_cq_table
{
426 /* protect radix tree
429 struct radix_tree_root tree
;
432 struct mlx5_qp_table
{
433 /* protect radix tree
436 struct radix_tree_root tree
;
439 struct mlx5_srq_table
{
440 /* protect radix tree
443 struct radix_tree_root tree
;
446 struct mlx5_mkey_table
{
447 /* protect radix tree
450 struct radix_tree_root tree
;
453 struct mlx5_vf_context
{
457 struct mlx5_core_sriov
{
458 struct mlx5_vf_context
*vfs_ctx
;
463 struct mlx5_irq_info
{
465 char name
[MLX5_MAX_IRQ_NAME
];
468 struct mlx5_fc_stats
{
469 struct rb_root counters
;
470 struct list_head addlist
;
471 /* protect addlist add/splice operations */
472 spinlock_t addlist_lock
;
474 struct workqueue_struct
*wq
;
475 struct delayed_work work
;
476 unsigned long next_query
;
482 struct mlx5_rl_entry
{
488 struct mlx5_rl_table
{
489 /* protect rate limit table */
490 struct mutex rl_lock
;
494 struct mlx5_rl_entry
*rl_entry
;
498 char name
[MLX5_MAX_NAME_LEN
];
499 struct mlx5_eq_table eq_table
;
500 struct msix_entry
*msix_arr
;
501 struct mlx5_irq_info
*irq_info
;
502 struct mlx5_uuar_info uuari
;
503 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock
);
506 struct workqueue_struct
*pg_wq
;
507 struct rb_root page_root
;
510 struct list_head free_list
;
513 struct mlx5_core_health health
;
515 struct mlx5_srq_table srq_table
;
517 /* start: qp staff */
518 struct mlx5_qp_table qp_table
;
519 struct dentry
*qp_debugfs
;
520 struct dentry
*eq_debugfs
;
521 struct dentry
*cq_debugfs
;
522 struct dentry
*cmdif_debugfs
;
525 /* start: cq staff */
526 struct mlx5_cq_table cq_table
;
529 /* start: mkey staff */
530 struct mlx5_mkey_table mkey_table
;
531 /* end: mkey staff */
533 /* start: alloc staff */
534 /* protect buffer alocation according to numa node */
535 struct mutex alloc_mutex
;
538 struct mutex pgdir_mutex
;
539 struct list_head pgdir_list
;
540 /* end: alloc staff */
541 struct dentry
*dbg_root
;
543 /* protect mkey key part */
544 spinlock_t mkey_lock
;
547 struct list_head dev_list
;
548 struct list_head ctx_list
;
551 struct mlx5_flow_steering
*steering
;
552 struct mlx5_eswitch
*eswitch
;
553 struct mlx5_core_sriov sriov
;
554 struct mlx5_lag
*lag
;
555 unsigned long pci_dev_data
;
556 struct mlx5_fc_stats fc_stats
;
557 struct mlx5_rl_table rl_table
;
560 enum mlx5_device_state
{
561 MLX5_DEVICE_STATE_UP
,
562 MLX5_DEVICE_STATE_INTERNAL_ERROR
,
565 enum mlx5_interface_state
{
566 MLX5_INTERFACE_STATE_DOWN
= BIT(0),
567 MLX5_INTERFACE_STATE_UP
= BIT(1),
568 MLX5_INTERFACE_STATE_SHUTDOWN
= BIT(2),
571 enum mlx5_pci_status
{
572 MLX5_PCI_STATUS_DISABLED
,
573 MLX5_PCI_STATUS_ENABLED
,
577 struct list_head tirs_list
;
581 struct mlx5e_resources
{
582 struct mlx5_uar cq_uar
;
585 struct mlx5_core_mkey mkey
;
588 struct mlx5_core_dev
{
589 struct pci_dev
*pdev
;
591 struct mutex pci_status_mutex
;
592 enum mlx5_pci_status pci_status
;
594 char board_id
[MLX5_BOARD_ID_LEN
];
596 struct mlx5_port_caps port_caps
[MLX5_MAX_PORTS
];
597 u32 hca_caps_cur
[MLX5_CAP_NUM
][MLX5_UN_SZ_DW(hca_cap_union
)];
598 u32 hca_caps_max
[MLX5_CAP_NUM
][MLX5_UN_SZ_DW(hca_cap_union
)];
599 phys_addr_t iseg_base
;
600 struct mlx5_init_seg __iomem
*iseg
;
601 enum mlx5_device_state state
;
602 /* sync interface state */
603 struct mutex intf_state_mutex
;
604 unsigned long intf_state
;
605 void (*event
) (struct mlx5_core_dev
*dev
,
606 enum mlx5_dev_event event
,
607 unsigned long param
);
608 struct mlx5_priv priv
;
609 struct mlx5_profile
*profile
;
612 struct mlx5e_resources mlx5e_res
;
613 #ifdef CONFIG_RFS_ACCEL
614 struct cpu_rmap
*rmap
;
621 struct mlx5_db_pgdir
*pgdir
;
622 struct mlx5_ib_user_db_page
*user_page
;
629 MLX5_DB_PER_PAGE
= PAGE_SIZE
/ L1_CACHE_BYTES
,
633 MLX5_COMP_EQ_SIZE
= 1024,
637 MLX5_PTYS_IB
= 1 << 0,
638 MLX5_PTYS_EN
= 1 << 2,
641 struct mlx5_db_pgdir
{
642 struct list_head list
;
643 DECLARE_BITMAP(bitmap
, MLX5_DB_PER_PAGE
);
648 typedef void (*mlx5_cmd_cbk_t
)(int status
, void *context
);
650 struct mlx5_cmd_work_ent
{
651 struct mlx5_cmd_msg
*in
;
652 struct mlx5_cmd_msg
*out
;
655 mlx5_cmd_cbk_t callback
;
656 struct delayed_work cb_timeout_work
;
659 struct completion done
;
660 struct mlx5_cmd
*cmd
;
661 struct work_struct work
;
662 struct mlx5_cmd_layout
*lay
;
677 enum port_state_policy
{
678 MLX5_POLICY_DOWN
= 0,
680 MLX5_POLICY_FOLLOW
= 2,
681 MLX5_POLICY_INVALID
= 0xffffffff
684 enum phy_port_state
{
688 struct mlx5_hca_vport_context
{
693 enum port_state_policy policy
;
694 enum phy_port_state phys_state
;
695 enum ib_port_state vport_state
;
696 u8 port_physical_state
;
705 u8 init_type_reply
; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
710 u16 qkey_violation_counter
;
711 u16 pkey_violation_counter
;
715 static inline void *mlx5_buf_offset(struct mlx5_buf
*buf
, int offset
)
717 return buf
->direct
.buf
+ offset
;
720 extern struct workqueue_struct
*mlx5_core_wq
;
722 #define STRUCT_FIELD(header, field) \
723 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
724 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
726 static inline struct mlx5_core_dev
*pci2mlx5_core_dev(struct pci_dev
*pdev
)
728 return pci_get_drvdata(pdev
);
731 extern struct dentry
*mlx5_debugfs_root
;
733 static inline u16
fw_rev_maj(struct mlx5_core_dev
*dev
)
735 return ioread32be(&dev
->iseg
->fw_rev
) & 0xffff;
738 static inline u16
fw_rev_min(struct mlx5_core_dev
*dev
)
740 return ioread32be(&dev
->iseg
->fw_rev
) >> 16;
743 static inline u16
fw_rev_sub(struct mlx5_core_dev
*dev
)
745 return ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) & 0xffff;
748 static inline u16
cmdif_rev(struct mlx5_core_dev
*dev
)
750 return ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) >> 16;
753 static inline void *mlx5_vzalloc(unsigned long size
)
757 rtn
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
763 static inline u32
mlx5_base_mkey(const u32 key
)
765 return key
& 0xffffff00u
;
768 int mlx5_cmd_init(struct mlx5_core_dev
*dev
);
769 void mlx5_cmd_cleanup(struct mlx5_core_dev
*dev
);
770 void mlx5_cmd_use_events(struct mlx5_core_dev
*dev
);
771 void mlx5_cmd_use_polling(struct mlx5_core_dev
*dev
);
773 int mlx5_cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
775 int mlx5_cmd_exec_cb(struct mlx5_core_dev
*dev
, void *in
, int in_size
,
776 void *out
, int out_size
, mlx5_cmd_cbk_t callback
,
778 void mlx5_cmd_mbox_status(void *out
, u8
*status
, u32
*syndrome
);
780 int mlx5_core_get_caps(struct mlx5_core_dev
*dev
, enum mlx5_cap_type cap_type
);
781 int mlx5_cmd_alloc_uar(struct mlx5_core_dev
*dev
, u32
*uarn
);
782 int mlx5_cmd_free_uar(struct mlx5_core_dev
*dev
, u32 uarn
);
783 int mlx5_alloc_uuars(struct mlx5_core_dev
*dev
, struct mlx5_uuar_info
*uuari
);
784 int mlx5_free_uuars(struct mlx5_core_dev
*dev
, struct mlx5_uuar_info
*uuari
);
785 int mlx5_alloc_map_uar(struct mlx5_core_dev
*mdev
, struct mlx5_uar
*uar
,
787 void mlx5_unmap_free_uar(struct mlx5_core_dev
*mdev
, struct mlx5_uar
*uar
);
788 void mlx5_health_cleanup(struct mlx5_core_dev
*dev
);
789 int mlx5_health_init(struct mlx5_core_dev
*dev
);
790 void mlx5_start_health_poll(struct mlx5_core_dev
*dev
);
791 void mlx5_stop_health_poll(struct mlx5_core_dev
*dev
);
792 int mlx5_buf_alloc_node(struct mlx5_core_dev
*dev
, int size
,
793 struct mlx5_buf
*buf
, int node
);
794 int mlx5_buf_alloc(struct mlx5_core_dev
*dev
, int size
, struct mlx5_buf
*buf
);
795 void mlx5_buf_free(struct mlx5_core_dev
*dev
, struct mlx5_buf
*buf
);
796 struct mlx5_cmd_mailbox
*mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev
*dev
,
797 gfp_t flags
, int npages
);
798 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev
*dev
,
799 struct mlx5_cmd_mailbox
*head
);
800 int mlx5_core_create_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
801 struct mlx5_srq_attr
*in
);
802 int mlx5_core_destroy_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
);
803 int mlx5_core_query_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
804 struct mlx5_srq_attr
*out
);
805 int mlx5_core_arm_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
806 u16 lwm
, int is_srq
);
807 void mlx5_init_mkey_table(struct mlx5_core_dev
*dev
);
808 void mlx5_cleanup_mkey_table(struct mlx5_core_dev
*dev
);
809 int mlx5_core_create_mkey_cb(struct mlx5_core_dev
*dev
,
810 struct mlx5_core_mkey
*mkey
,
812 u32
*out
, int outlen
,
813 mlx5_cmd_cbk_t callback
, void *context
);
814 int mlx5_core_create_mkey(struct mlx5_core_dev
*dev
,
815 struct mlx5_core_mkey
*mkey
,
817 int mlx5_core_destroy_mkey(struct mlx5_core_dev
*dev
,
818 struct mlx5_core_mkey
*mkey
);
819 int mlx5_core_query_mkey(struct mlx5_core_dev
*dev
, struct mlx5_core_mkey
*mkey
,
820 u32
*out
, int outlen
);
821 int mlx5_core_dump_fill_mkey(struct mlx5_core_dev
*dev
, struct mlx5_core_mkey
*_mkey
,
823 int mlx5_core_alloc_pd(struct mlx5_core_dev
*dev
, u32
*pdn
);
824 int mlx5_core_dealloc_pd(struct mlx5_core_dev
*dev
, u32 pdn
);
825 int mlx5_core_mad_ifc(struct mlx5_core_dev
*dev
, const void *inb
, void *outb
,
827 void mlx5_pagealloc_init(struct mlx5_core_dev
*dev
);
828 void mlx5_pagealloc_cleanup(struct mlx5_core_dev
*dev
);
829 int mlx5_pagealloc_start(struct mlx5_core_dev
*dev
);
830 void mlx5_pagealloc_stop(struct mlx5_core_dev
*dev
);
831 void mlx5_core_req_pages_handler(struct mlx5_core_dev
*dev
, u16 func_id
,
833 int mlx5_satisfy_startup_pages(struct mlx5_core_dev
*dev
, int boot
);
834 int mlx5_reclaim_startup_pages(struct mlx5_core_dev
*dev
);
835 void mlx5_register_debugfs(void);
836 void mlx5_unregister_debugfs(void);
837 int mlx5_eq_init(struct mlx5_core_dev
*dev
);
838 void mlx5_eq_cleanup(struct mlx5_core_dev
*dev
);
839 void mlx5_fill_page_array(struct mlx5_buf
*buf
, __be64
*pas
);
840 void mlx5_cq_completion(struct mlx5_core_dev
*dev
, u32 cqn
);
841 void mlx5_rsc_event(struct mlx5_core_dev
*dev
, u32 rsn
, int event_type
);
842 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
843 void mlx5_eq_pagefault(struct mlx5_core_dev
*dev
, struct mlx5_eqe
*eqe
);
845 void mlx5_srq_event(struct mlx5_core_dev
*dev
, u32 srqn
, int event_type
);
846 struct mlx5_core_srq
*mlx5_core_get_srq(struct mlx5_core_dev
*dev
, u32 srqn
);
847 void mlx5_cmd_comp_handler(struct mlx5_core_dev
*dev
, u64 vec
);
848 void mlx5_cq_event(struct mlx5_core_dev
*dev
, u32 cqn
, int event_type
);
849 int mlx5_create_map_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
, u8 vecidx
,
850 int nent
, u64 mask
, const char *name
, struct mlx5_uar
*uar
);
851 int mlx5_destroy_unmap_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
852 int mlx5_start_eqs(struct mlx5_core_dev
*dev
);
853 int mlx5_stop_eqs(struct mlx5_core_dev
*dev
);
854 int mlx5_vector2eqn(struct mlx5_core_dev
*dev
, int vector
, int *eqn
,
856 int mlx5_core_attach_mcg(struct mlx5_core_dev
*dev
, union ib_gid
*mgid
, u32 qpn
);
857 int mlx5_core_detach_mcg(struct mlx5_core_dev
*dev
, union ib_gid
*mgid
, u32 qpn
);
859 int mlx5_qp_debugfs_init(struct mlx5_core_dev
*dev
);
860 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev
*dev
);
861 int mlx5_core_access_reg(struct mlx5_core_dev
*dev
, void *data_in
,
862 int size_in
, void *data_out
, int size_out
,
863 u16 reg_num
, int arg
, int write
);
865 int mlx5_debug_eq_add(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
866 void mlx5_debug_eq_remove(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
867 int mlx5_core_eq_query(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
,
868 u32
*out
, int outlen
);
869 int mlx5_eq_debugfs_init(struct mlx5_core_dev
*dev
);
870 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev
*dev
);
871 int mlx5_cq_debugfs_init(struct mlx5_core_dev
*dev
);
872 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev
*dev
);
873 int mlx5_db_alloc(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
);
874 int mlx5_db_alloc_node(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
,
876 void mlx5_db_free(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
);
878 const char *mlx5_command_str(int command
);
879 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev
*dev
);
880 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev
*dev
);
881 int mlx5_core_create_psv(struct mlx5_core_dev
*dev
, u32 pdn
,
882 int npsvs
, u32
*sig_index
);
883 int mlx5_core_destroy_psv(struct mlx5_core_dev
*dev
, int psv_num
);
884 void mlx5_core_put_rsc(struct mlx5_core_rsc_common
*common
);
885 int mlx5_query_odp_caps(struct mlx5_core_dev
*dev
,
886 struct mlx5_odp_caps
*odp_caps
);
887 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev
*dev
,
888 u8 port_num
, void *out
, size_t sz
);
890 int mlx5_init_rl_table(struct mlx5_core_dev
*dev
);
891 void mlx5_cleanup_rl_table(struct mlx5_core_dev
*dev
);
892 int mlx5_rl_add_rate(struct mlx5_core_dev
*dev
, u32 rate
, u16
*index
);
893 void mlx5_rl_remove_rate(struct mlx5_core_dev
*dev
, u32 rate
);
894 bool mlx5_rl_is_in_range(struct mlx5_core_dev
*dev
, u32 rate
);
896 static inline int fw_initializing(struct mlx5_core_dev
*dev
)
898 return ioread32be(&dev
->iseg
->initializing
) >> 31;
901 static inline u32
mlx5_mkey_to_idx(u32 mkey
)
906 static inline u32
mlx5_idx_to_mkey(u32 mkey_idx
)
908 return mkey_idx
<< 8;
911 static inline u8
mlx5_mkey_variant(u32 mkey
)
917 MLX5_PROF_MASK_QP_SIZE
= (u64
)1 << 0,
918 MLX5_PROF_MASK_MR_CACHE
= (u64
)1 << 1,
922 MAX_MR_CACHE_ENTRIES
= 16,
926 MLX5_INTERFACE_PROTOCOL_IB
= 0,
927 MLX5_INTERFACE_PROTOCOL_ETH
= 1,
930 struct mlx5_interface
{
931 void * (*add
)(struct mlx5_core_dev
*dev
);
932 void (*remove
)(struct mlx5_core_dev
*dev
, void *context
);
933 int (*attach
)(struct mlx5_core_dev
*dev
, void *context
);
934 void (*detach
)(struct mlx5_core_dev
*dev
, void *context
);
935 void (*event
)(struct mlx5_core_dev
*dev
, void *context
,
936 enum mlx5_dev_event event
, unsigned long param
);
937 void * (*get_dev
)(void *context
);
939 struct list_head list
;
942 void *mlx5_get_protocol_dev(struct mlx5_core_dev
*mdev
, int protocol
);
943 int mlx5_register_interface(struct mlx5_interface
*intf
);
944 void mlx5_unregister_interface(struct mlx5_interface
*intf
);
945 int mlx5_core_query_vendor_id(struct mlx5_core_dev
*mdev
, u32
*vendor_id
);
947 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev
*dev
);
948 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev
*dev
);
949 bool mlx5_lag_is_active(struct mlx5_core_dev
*dev
);
950 struct net_device
*mlx5_lag_get_roce_netdev(struct mlx5_core_dev
*dev
);
952 struct mlx5_profile
{
958 } mr_cache
[MAX_MR_CACHE_ENTRIES
];
962 MLX5_PCI_DEV_IS_VF
= 1 << 0,
965 static inline int mlx5_core_is_pf(struct mlx5_core_dev
*dev
)
967 return !(dev
->priv
.pci_dev_data
& MLX5_PCI_DEV_IS_VF
);
970 static inline int mlx5_get_gid_table_len(u16 param
)
973 pr_warn("gid table length is zero\n");
977 return 8 * (1 << param
);
980 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev
*dev
)
982 return !!(dev
->priv
.rl_table
.max_size
);
986 MLX5_TRIGGERED_CMD_COMP
= (u64
)1 << 32,
989 #endif /* MLX5_DRIVER_H */