]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / mlx5_core.h
CommitLineData
e126ba97 1/*
f62b8bb8 2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
e126ba97
EC
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef __MLX5_CORE_H__
34#define __MLX5_CORE_H__
35
36#include <linux/types.h>
37#include <linux/kernel.h>
38#include <linux/sched.h>
81848731 39#include <linux/if_link.h>
62bd22cf 40#include <linux/firmware.h>
e126ba97 41
f62b8bb8 42#define DRIVER_NAME "mlx5_core"
7913d205 43#define DRIVER_VERSION "5.0-0"
f62b8bb8 44
efdc810b
MHY
45#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
46
f663ad98 47extern uint mlx5_core_debug_mask;
e126ba97 48
5a788398 49#define mlx5_core_dbg(__dev, format, ...) \
9e5b2fc1
KH
50 dev_dbg(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
51 __func__, __LINE__, current->pid, \
1a91de28 52 ##__VA_ARGS__)
e126ba97 53
5a788398 54#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
1a91de28
JP
55do { \
56 if ((mask) & mlx5_core_debug_mask) \
5a788398 57 mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \
e126ba97
EC
58} while (0)
59
5a788398 60#define mlx5_core_err(__dev, format, ...) \
2974ab6e
SM
61 dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
62 __func__, __LINE__, current->pid, \
1a91de28 63 ##__VA_ARGS__)
e126ba97 64
5a788398 65#define mlx5_core_warn(__dev, format, ...) \
9e5b2fc1
KH
66 dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
67 __func__, __LINE__, current->pid, \
1a91de28 68 ##__VA_ARGS__)
e126ba97 69
108805fc
SM
70#define mlx5_core_info(__dev, format, ...) \
71 dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
72
e126ba97
EC
73enum {
74 MLX5_CMD_DATA, /* print command payload only */
75 MLX5_CMD_TIME, /* print command execution time */
76};
77
f9c14e46
KH
78enum {
79 MLX5_DRIVER_STATUS_ABORTED = 0xfe,
80 MLX5_DRIVER_SYND = 0xbadd00de,
81};
82
938fe83c 83int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
211e6c80 84int mlx5_query_board_id(struct mlx5_core_dev *dev);
e126ba97
EC
85int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
86int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
8812c24d 87int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
89d44f0a
MD
88void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
89 unsigned long param);
d9aaed83
AK
90void mlx5_core_page_fault(struct mlx5_core_dev *dev,
91 struct mlx5_pagefault *pfault);
d4eb4cd7 92void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
8812c24d 93void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
89d44f0a 94void mlx5_disable_device(struct mlx5_core_dev *dev);
04c0c1ab 95void mlx5_recover_device(struct mlx5_core_dev *dev);
6b6adee3
MHY
96int mlx5_sriov_init(struct mlx5_core_dev *dev);
97void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
acab721b
MHY
98int mlx5_sriov_attach(struct mlx5_core_dev *dev);
99void mlx5_sriov_detach(struct mlx5_core_dev *dev);
fc50db98 100int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
edb31b16 101bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev);
0b107106
EC
102int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
103int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
813f8540
MHY
104int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
105 void *context, u32 *element_id);
106int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
107 void *context, u32 element_id,
108 u32 modify_bitmask);
109int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
110 u32 element_id);
fc50db98 111int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
a5a1d1c2 112u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
daa21560 113u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
94c6825e
MB
114struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
115void mlx5_cq_tasklet_cb(unsigned long data);
e126ba97 116
c835ad64
GP
117int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
118 u8 access_reg_group);
119int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
120 u8 access_reg_group);
121
7907f23a
AH
122void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
123void mlx5_lag_remove(struct mlx5_core_dev *dev);
124
f1ee87fe
MHY
125void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
126void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
127void mlx5_attach_device(struct mlx5_core_dev *dev);
128void mlx5_detach_device(struct mlx5_core_dev *dev);
129bool mlx5_device_registered(struct mlx5_core_dev *dev);
130int mlx5_register_device(struct mlx5_core_dev *dev);
131void mlx5_unregister_device(struct mlx5_core_dev *dev);
7907f23a
AH
132void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
133void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
f1ee87fe
MHY
134struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
135void mlx5_dev_list_lock(void);
136void mlx5_dev_list_unlock(void);
137int mlx5_dev_list_trylock(void);
ae9f83ac
HHZ
138int mlx5_encap_alloc(struct mlx5_core_dev *dev,
139 int header_type,
140 size_t size,
141 void *encap_header,
142 u32 *encap_id);
143void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
7907f23a 144
2de24fed
OG
145int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
146 u8 namespace, u8 num_actions,
147 void *modify_actions, u32 *modify_header_id);
148void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id);
149
917b41aa
AH
150bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
151
f9a1ef72
EE
152int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
153int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
154int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
155int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
156
fa367688
EE
157#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
158 MLX5_CAP_GEN((mdev), pps_modify) && \
159 MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
160 MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
161
62bd22cf
OG
162int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw);
163
f62b8bb8
AV
164void mlx5e_init(void);
165void mlx5e_cleanup(void);
166
db60b802
AH
167static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
168{
169 /* LACP owner conditions:
170 * 1) Function is physical.
171 * 2) LAG is supported by FW.
172 * 3) LAG is managed by driver (currently the only option).
173 */
174 return MLX5_CAP_GEN(dev, vport_group_manager) &&
175 (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
176 MLX5_CAP_GEN(dev, lag_master);
177}
178
552db7bc
MS
179int mlx5_lag_allow(struct mlx5_core_dev *dev);
180int mlx5_lag_forbid(struct mlx5_core_dev *dev);
181
e126ba97 182#endif /* __MLX5_CORE_H__ */