]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/ethernet/qlogic/qed/qed_sriov.h
qed: Add VF->PF channel infrastructure
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / qlogic / qed / qed_sriov.h
CommitLineData
32a47e72
YM
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#ifndef _QED_SRIOV_H
10#define _QED_SRIOV_H
11#include <linux/types.h>
12#include "qed_vf.h"
13#define QED_VF_ARRAY_LENGTH (3)
14
15#define IS_VF(cdev) ((cdev)->b_is_vf)
16#define IS_PF(cdev) (!((cdev)->b_is_vf))
17#ifdef CONFIG_QED_SRIOV
18#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
19#else
20#define IS_PF_SRIOV(p_hwfn) (0)
21#endif
22#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
23
24/* This struct is part of qed_dev and contains data relevant to all hwfns;
25 * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
26 */
27struct qed_hw_sriov_info {
28 int pos; /* capability position */
29 int nres; /* number of resources */
30 u32 cap; /* SR-IOV Capabilities */
31 u16 ctrl; /* SR-IOV Control */
32 u16 total_vfs; /* total VFs associated with the PF */
33 u16 num_vfs; /* number of vfs that have been started */
34 u16 initial_vfs; /* initial VFs associated with the PF */
35 u16 nr_virtfn; /* number of VFs available */
36 u16 offset; /* first VF Routing ID offset */
37 u16 stride; /* following VF stride */
38 u16 vf_device_id; /* VF device id */
39 u32 pgsz; /* page size for BAR alignment */
40 u8 link; /* Function Dependency Link */
41
42 u32 first_vf_in_pf;
43};
44
45/* This mailbox is maintained per VF in its PF contains all information
46 * required for sending / receiving a message.
47 */
48struct qed_iov_vf_mbx {
49 union vfpf_tlvs *req_virt;
50 dma_addr_t req_phys;
51 union pfvf_tlvs *reply_virt;
52 dma_addr_t reply_phys;
37bff2b9
YM
53
54 /* Address in VF where a pending message is located */
55 dma_addr_t pending_req;
56
57 u8 *offset;
58
59 /* saved VF request header */
60 struct vfpf_first_tlv first_tlv;
32a47e72
YM
61};
62
63enum vf_state {
64 VF_STOPPED /* VF, Stopped */
65};
66
67/* PFs maintain an array of this structure, per VF */
68struct qed_vf_info {
69 struct qed_iov_vf_mbx vf_mbx;
70 enum vf_state state;
71 bool b_init;
72
73 struct qed_bulletin bulletin;
74 dma_addr_t vf_bulletin;
75
76 u32 concrete_fid;
77 u16 opaque_fid;
78
79 u8 vport_id;
80 u8 relative_vf_id;
81 u8 abs_vf_id;
82#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
83 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
84 (p_vf)->abs_vf_id)
85};
86
87/* This structure is part of qed_hwfn and used only for PFs that have sriov
88 * capability enabled.
89 */
90struct qed_pf_iov {
91 struct qed_vf_info vfs_array[MAX_NUM_VFS];
92 u64 pending_events[QED_VF_ARRAY_LENGTH];
93 u64 pending_flr[QED_VF_ARRAY_LENGTH];
94
95 /* Allocate message address continuosuly and split to each VF */
96 void *mbx_msg_virt_addr;
97 dma_addr_t mbx_msg_phys_addr;
98 u32 mbx_msg_size;
99 void *mbx_reply_virt_addr;
100 dma_addr_t mbx_reply_phys_addr;
101 u32 mbx_reply_size;
102 void *p_bulletins;
103 dma_addr_t bulletins_phys;
104 u32 bulletins_size;
105};
106
37bff2b9
YM
107enum qed_iov_wq_flag {
108 QED_IOV_WQ_MSG_FLAG,
109 QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
110 QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
111 QED_IOV_WQ_STOP_WQ_FLAG,
112 QED_IOV_WQ_FLR_FLAG,
113};
114
32a47e72
YM
115#ifdef CONFIG_QED_SRIOV
116/**
117 * @brief - Given a VF index, return index of next [including that] active VF.
118 *
119 * @param p_hwfn
120 * @param rel_vf_id
121 *
122 * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
123 */
124u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
125
126/**
127 * @brief Read sriov related information and allocated resources
128 * reads from configuraiton space, shmem, etc.
129 *
130 * @param p_hwfn
131 *
132 * @return int
133 */
134int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
135
136/**
137 * @brief qed_iov_alloc - allocate sriov related resources
138 *
139 * @param p_hwfn
140 *
141 * @return int
142 */
143int qed_iov_alloc(struct qed_hwfn *p_hwfn);
144
145/**
146 * @brief qed_iov_setup - setup sriov related resources
147 *
148 * @param p_hwfn
149 * @param p_ptt
150 */
151void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
152
153/**
154 * @brief qed_iov_free - free sriov related resources
155 *
156 * @param p_hwfn
157 */
158void qed_iov_free(struct qed_hwfn *p_hwfn);
159
160/**
161 * @brief free sriov related memory that was allocated during hw_prepare
162 *
163 * @param cdev
164 */
165void qed_iov_free_hw_info(struct qed_dev *cdev);
37bff2b9
YM
166
167/**
168 * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
169 *
170 * @param p_hwfn
171 * @param opcode
172 * @param echo
173 * @param data
174 */
175int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
176 u8 opcode, __le16 echo, union event_ring_data *data);
177
178void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
179int qed_iov_wq_start(struct qed_dev *cdev);
180
181void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
32a47e72
YM
182#else
183static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
184 u16 rel_vf_id)
185{
186 return MAX_NUM_VFS;
187}
188
189static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
190{
191 return 0;
192}
193
194static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
195{
196 return 0;
197}
198
199static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
200{
201}
202
203static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
204{
205}
206
207static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
208{
209}
37bff2b9
YM
210
211static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
212 u8 opcode,
213 __le16 echo, union event_ring_data *data)
214{
215 return -EINVAL;
216}
217
218static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
219{
220}
221
222static inline int qed_iov_wq_start(struct qed_dev *cdev)
223{
224 return 0;
225}
226
227static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
228 enum qed_iov_wq_flag flag)
229{
230}
32a47e72
YM
231#endif
232
233#define qed_for_each_vf(_p_hwfn, _i) \
234 for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
235 _i < MAX_NUM_VFS; \
236 _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
237
238#endif