]>
Commit | Line | Data |
---|---|---|
32a47e72 | 1 | /* QLogic qed NIC Driver |
e8f1cb50 | 2 | * Copyright (c) 2015-2017 QLogic Corporation |
32a47e72 | 3 | * |
e8f1cb50 MY |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and /or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
32a47e72 YM |
31 | */ |
32 | ||
dacd88d6 | 33 | #include <linux/etherdevice.h> |
36558c3d | 34 | #include <linux/crc32.h> |
f29ffdb6 | 35 | #include <linux/vmalloc.h> |
0b55e27d | 36 | #include <linux/qed/qed_iov_if.h> |
1408cc1f YM |
37 | #include "qed_cxt.h" |
38 | #include "qed_hsi.h" | |
32a47e72 | 39 | #include "qed_hw.h" |
1408cc1f | 40 | #include "qed_init_ops.h" |
32a47e72 | 41 | #include "qed_int.h" |
1408cc1f | 42 | #include "qed_mcp.h" |
32a47e72 | 43 | #include "qed_reg_addr.h" |
1408cc1f | 44 | #include "qed_sp.h" |
32a47e72 YM |
45 | #include "qed_sriov.h" |
46 | #include "qed_vf.h" | |
47 | ||
1408cc1f | 48 | /* IOV ramrods */ |
1fe614d1 | 49 | static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) |
1408cc1f YM |
50 | { |
51 | struct vf_start_ramrod_data *p_ramrod = NULL; | |
52 | struct qed_spq_entry *p_ent = NULL; | |
53 | struct qed_sp_init_data init_data; | |
54 | int rc = -EINVAL; | |
1fe614d1 | 55 | u8 fp_minor; |
1408cc1f YM |
56 | |
57 | /* Get SPQ entry */ | |
58 | memset(&init_data, 0, sizeof(init_data)); | |
59 | init_data.cid = qed_spq_get_cid(p_hwfn); | |
1fe614d1 | 60 | init_data.opaque_fid = p_vf->opaque_fid; |
1408cc1f YM |
61 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; |
62 | ||
63 | rc = qed_sp_init_request(p_hwfn, &p_ent, | |
64 | COMMON_RAMROD_VF_START, | |
65 | PROTOCOLID_COMMON, &init_data); | |
66 | if (rc) | |
67 | return rc; | |
68 | ||
69 | p_ramrod = &p_ent->ramrod.vf_start; | |
70 | ||
1fe614d1 YM |
71 | p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); |
72 | p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid); | |
73 | ||
74 | switch (p_hwfn->hw_info.personality) { | |
75 | case QED_PCI_ETH: | |
76 | p_ramrod->personality = PERSONALITY_ETH; | |
77 | break; | |
78 | case QED_PCI_ETH_ROCE: | |
79 | p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; | |
80 | break; | |
81 | default: | |
82 | DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", | |
83 | p_hwfn->hw_info.personality); | |
84 | return -EINVAL; | |
85 | } | |
86 | ||
87 | fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; | |
a044df83 YM |
88 | if (fp_minor > ETH_HSI_VER_MINOR && |
89 | fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) { | |
1fe614d1 YM |
90 | DP_VERBOSE(p_hwfn, |
91 | QED_MSG_IOV, | |
92 | "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n", | |
93 | p_vf->abs_vf_id, | |
94 | ETH_HSI_VER_MAJOR, | |
95 | fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); | |
96 | fp_minor = ETH_HSI_VER_MINOR; | |
97 | } | |
1408cc1f | 98 | |
351a4ded | 99 | p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; |
1fe614d1 YM |
100 | p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor; |
101 | ||
102 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
103 | "VF[%d] - Starting using HSI %02x.%02x\n", | |
104 | p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); | |
1408cc1f YM |
105 | |
106 | return qed_spq_post(p_hwfn, p_ent, NULL); | |
107 | } | |
108 | ||
0b55e27d YM |
109 | static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, |
110 | u32 concrete_vfid, u16 opaque_vfid) | |
111 | { | |
112 | struct vf_stop_ramrod_data *p_ramrod = NULL; | |
113 | struct qed_spq_entry *p_ent = NULL; | |
114 | struct qed_sp_init_data init_data; | |
115 | int rc = -EINVAL; | |
116 | ||
117 | /* Get SPQ entry */ | |
118 | memset(&init_data, 0, sizeof(init_data)); | |
119 | init_data.cid = qed_spq_get_cid(p_hwfn); | |
120 | init_data.opaque_fid = opaque_vfid; | |
121 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
122 | ||
123 | rc = qed_sp_init_request(p_hwfn, &p_ent, | |
124 | COMMON_RAMROD_VF_STOP, | |
125 | PROTOCOLID_COMMON, &init_data); | |
126 | if (rc) | |
127 | return rc; | |
128 | ||
129 | p_ramrod = &p_ent->ramrod.vf_stop; | |
130 | ||
131 | p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); | |
132 | ||
133 | return qed_spq_post(p_hwfn, p_ent, NULL); | |
134 | } | |
135 | ||
ba56947a | 136 | static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, |
7eff82b0 YM |
137 | int rel_vf_id, |
138 | bool b_enabled_only, bool b_non_malicious) | |
32a47e72 YM |
139 | { |
140 | if (!p_hwfn->pf_iov_info) { | |
141 | DP_NOTICE(p_hwfn->cdev, "No iov info\n"); | |
142 | return false; | |
143 | } | |
144 | ||
145 | if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) || | |
146 | (rel_vf_id < 0)) | |
147 | return false; | |
148 | ||
149 | if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && | |
150 | b_enabled_only) | |
151 | return false; | |
152 | ||
7eff82b0 YM |
153 | if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) && |
154 | b_non_malicious) | |
155 | return false; | |
156 | ||
32a47e72 YM |
157 | return true; |
158 | } | |
159 | ||
37bff2b9 YM |
160 | static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, |
161 | u16 relative_vf_id, | |
162 | bool b_enabled_only) | |
163 | { | |
164 | struct qed_vf_info *vf = NULL; | |
165 | ||
166 | if (!p_hwfn->pf_iov_info) { | |
167 | DP_NOTICE(p_hwfn->cdev, "No iov info\n"); | |
168 | return NULL; | |
169 | } | |
170 | ||
7eff82b0 YM |
171 | if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, |
172 | b_enabled_only, false)) | |
37bff2b9 YM |
173 | vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; |
174 | else | |
175 | DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", | |
176 | relative_vf_id); | |
177 | ||
178 | return vf; | |
179 | } | |
180 | ||
f109c240 MY |
181 | enum qed_iov_validate_q_mode { |
182 | QED_IOV_VALIDATE_Q_NA, | |
183 | QED_IOV_VALIDATE_Q_ENABLE, | |
184 | QED_IOV_VALIDATE_Q_DISABLE, | |
185 | }; | |
186 | ||
187 | static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn, | |
188 | struct qed_vf_info *p_vf, | |
189 | u16 qid, | |
190 | enum qed_iov_validate_q_mode mode, | |
191 | bool b_is_tx) | |
192 | { | |
193 | if (mode == QED_IOV_VALIDATE_Q_NA) | |
194 | return true; | |
195 | ||
196 | if ((b_is_tx && p_vf->vf_queues[qid].p_tx_cid) || | |
197 | (!b_is_tx && p_vf->vf_queues[qid].p_rx_cid)) | |
198 | return mode == QED_IOV_VALIDATE_Q_ENABLE; | |
199 | ||
200 | /* In case we haven't found any valid cid, then its disabled */ | |
201 | return mode == QED_IOV_VALIDATE_Q_DISABLE; | |
202 | } | |
203 | ||
41086467 | 204 | static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn, |
f109c240 MY |
205 | struct qed_vf_info *p_vf, |
206 | u16 rx_qid, | |
207 | enum qed_iov_validate_q_mode mode) | |
41086467 | 208 | { |
f109c240 | 209 | if (rx_qid >= p_vf->num_rxqs) { |
41086467 YM |
210 | DP_VERBOSE(p_hwfn, |
211 | QED_MSG_IOV, | |
212 | "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n", | |
213 | p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); | |
f109c240 MY |
214 | return false; |
215 | } | |
216 | ||
217 | return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false); | |
41086467 YM |
218 | } |
219 | ||
220 | static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn, | |
f109c240 MY |
221 | struct qed_vf_info *p_vf, |
222 | u16 tx_qid, | |
223 | enum qed_iov_validate_q_mode mode) | |
41086467 | 224 | { |
f109c240 | 225 | if (tx_qid >= p_vf->num_txqs) { |
41086467 YM |
226 | DP_VERBOSE(p_hwfn, |
227 | QED_MSG_IOV, | |
228 | "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n", | |
229 | p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); | |
f109c240 MY |
230 | return false; |
231 | } | |
232 | ||
233 | return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true); | |
41086467 YM |
234 | } |
235 | ||
236 | static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn, | |
237 | struct qed_vf_info *p_vf, u16 sb_idx) | |
238 | { | |
239 | int i; | |
240 | ||
241 | for (i = 0; i < p_vf->num_sbs; i++) | |
242 | if (p_vf->igu_sbs[i] == sb_idx) | |
243 | return true; | |
244 | ||
245 | DP_VERBOSE(p_hwfn, | |
246 | QED_MSG_IOV, | |
247 | "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n", | |
248 | p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); | |
249 | ||
250 | return false; | |
251 | } | |
252 | ||
f109c240 MY |
253 | static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn, |
254 | struct qed_vf_info *p_vf) | |
255 | { | |
256 | u8 i; | |
257 | ||
258 | for (i = 0; i < p_vf->num_rxqs; i++) | |
259 | if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, | |
260 | QED_IOV_VALIDATE_Q_ENABLE, | |
261 | false)) | |
262 | return true; | |
263 | ||
264 | return false; | |
265 | } | |
266 | ||
267 | static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn, | |
268 | struct qed_vf_info *p_vf) | |
269 | { | |
270 | u8 i; | |
271 | ||
272 | for (i = 0; i < p_vf->num_txqs; i++) | |
273 | if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, | |
274 | QED_IOV_VALIDATE_Q_ENABLE, | |
275 | true)) | |
276 | return true; | |
277 | ||
278 | return false; | |
279 | } | |
280 | ||
ba56947a BX |
281 | static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, |
282 | int vfid, struct qed_ptt *p_ptt) | |
36558c3d YM |
283 | { |
284 | struct qed_bulletin_content *p_bulletin; | |
285 | int crc_size = sizeof(p_bulletin->crc); | |
286 | struct qed_dmae_params params; | |
287 | struct qed_vf_info *p_vf; | |
288 | ||
289 | p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); | |
290 | if (!p_vf) | |
291 | return -EINVAL; | |
292 | ||
293 | if (!p_vf->vf_bulletin) | |
294 | return -EINVAL; | |
295 | ||
296 | p_bulletin = p_vf->bulletin.p_virt; | |
297 | ||
298 | /* Increment bulletin board version and compute crc */ | |
299 | p_bulletin->version++; | |
300 | p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size, | |
301 | p_vf->bulletin.size - crc_size); | |
302 | ||
303 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
304 | "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n", | |
305 | p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); | |
306 | ||
307 | /* propagate bulletin board via dmae to vm memory */ | |
308 | memset(¶ms, 0, sizeof(params)); | |
309 | params.flags = QED_DMAE_FLAG_VF_DST; | |
310 | params.dst_vfid = p_vf->abs_vf_id; | |
311 | return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, | |
312 | p_vf->vf_bulletin, p_vf->bulletin.size / 4, | |
313 | ¶ms); | |
314 | } | |
315 | ||
32a47e72 YM |
316 | static int qed_iov_pci_cfg_info(struct qed_dev *cdev) |
317 | { | |
318 | struct qed_hw_sriov_info *iov = cdev->p_iov_info; | |
319 | int pos = iov->pos; | |
320 | ||
321 | DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos); | |
322 | pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); | |
323 | ||
324 | pci_read_config_word(cdev->pdev, | |
325 | pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); | |
326 | pci_read_config_word(cdev->pdev, | |
327 | pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); | |
328 | ||
329 | pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); | |
330 | if (iov->num_vfs) { | |
331 | DP_VERBOSE(cdev, | |
332 | QED_MSG_IOV, | |
333 | "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n"); | |
334 | iov->num_vfs = 0; | |
335 | } | |
336 | ||
337 | pci_read_config_word(cdev->pdev, | |
338 | pos + PCI_SRIOV_VF_OFFSET, &iov->offset); | |
339 | ||
340 | pci_read_config_word(cdev->pdev, | |
341 | pos + PCI_SRIOV_VF_STRIDE, &iov->stride); | |
342 | ||
343 | pci_read_config_word(cdev->pdev, | |
344 | pos + PCI_SRIOV_VF_DID, &iov->vf_device_id); | |
345 | ||
346 | pci_read_config_dword(cdev->pdev, | |
347 | pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); | |
348 | ||
349 | pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap); | |
350 | ||
351 | pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); | |
352 | ||
353 | DP_VERBOSE(cdev, | |
354 | QED_MSG_IOV, | |
355 | "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", | |
356 | iov->nres, | |
357 | iov->cap, | |
358 | iov->ctrl, | |
359 | iov->total_vfs, | |
360 | iov->initial_vfs, | |
361 | iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); | |
362 | ||
363 | /* Some sanity checks */ | |
364 | if (iov->num_vfs > NUM_OF_VFS(cdev) || | |
365 | iov->total_vfs > NUM_OF_VFS(cdev)) { | |
366 | /* This can happen only due to a bug. In this case we set | |
367 | * num_vfs to zero to avoid memory corruption in the code that | |
368 | * assumes max number of vfs | |
369 | */ | |
370 | DP_NOTICE(cdev, | |
371 | "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n", | |
372 | iov->num_vfs); | |
373 | ||
374 | iov->num_vfs = 0; | |
375 | iov->total_vfs = 0; | |
376 | } | |
377 | ||
378 | return 0; | |
379 | } | |
380 | ||
32a47e72 YM |
381 | static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) |
382 | { | |
383 | struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; | |
384 | struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; | |
385 | struct qed_bulletin_content *p_bulletin_virt; | |
386 | dma_addr_t req_p, rply_p, bulletin_p; | |
387 | union pfvf_tlvs *p_reply_virt_addr; | |
388 | union vfpf_tlvs *p_req_virt_addr; | |
389 | u8 idx = 0; | |
390 | ||
391 | memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); | |
392 | ||
393 | p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; | |
394 | req_p = p_iov_info->mbx_msg_phys_addr; | |
395 | p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; | |
396 | rply_p = p_iov_info->mbx_reply_phys_addr; | |
397 | p_bulletin_virt = p_iov_info->p_bulletins; | |
398 | bulletin_p = p_iov_info->bulletins_phys; | |
399 | if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { | |
400 | DP_ERR(p_hwfn, | |
401 | "qed_iov_setup_vfdb called without allocating mem first\n"); | |
402 | return; | |
403 | } | |
404 | ||
405 | for (idx = 0; idx < p_iov->total_vfs; idx++) { | |
406 | struct qed_vf_info *vf = &p_iov_info->vfs_array[idx]; | |
407 | u32 concrete; | |
408 | ||
409 | vf->vf_mbx.req_virt = p_req_virt_addr + idx; | |
410 | vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); | |
411 | vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; | |
412 | vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); | |
413 | ||
414 | vf->state = VF_STOPPED; | |
415 | vf->b_init = false; | |
416 | ||
417 | vf->bulletin.phys = idx * | |
418 | sizeof(struct qed_bulletin_content) + | |
419 | bulletin_p; | |
420 | vf->bulletin.p_virt = p_bulletin_virt + idx; | |
421 | vf->bulletin.size = sizeof(struct qed_bulletin_content); | |
422 | ||
423 | vf->relative_vf_id = idx; | |
424 | vf->abs_vf_id = idx + p_iov->first_vf_in_pf; | |
425 | concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id); | |
426 | vf->concrete_fid = concrete; | |
427 | vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | | |
428 | (vf->abs_vf_id << 8); | |
429 | vf->vport_id = idx + 1; | |
1cf2b1a9 YM |
430 | |
431 | vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; | |
432 | vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; | |
32a47e72 YM |
433 | } |
434 | } | |
435 | ||
436 | static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) | |
437 | { | |
438 | struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; | |
439 | void **p_v_addr; | |
440 | u16 num_vfs = 0; | |
441 | ||
442 | num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; | |
443 | ||
444 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
445 | "qed_iov_allocate_vfdb for %d VFs\n", num_vfs); | |
446 | ||
447 | /* Allocate PF Mailbox buffer (per-VF) */ | |
448 | p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; | |
449 | p_v_addr = &p_iov_info->mbx_msg_virt_addr; | |
450 | *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, | |
451 | p_iov_info->mbx_msg_size, | |
452 | &p_iov_info->mbx_msg_phys_addr, | |
453 | GFP_KERNEL); | |
454 | if (!*p_v_addr) | |
455 | return -ENOMEM; | |
456 | ||
457 | /* Allocate PF Mailbox Reply buffer (per-VF) */ | |
458 | p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; | |
459 | p_v_addr = &p_iov_info->mbx_reply_virt_addr; | |
460 | *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, | |
461 | p_iov_info->mbx_reply_size, | |
462 | &p_iov_info->mbx_reply_phys_addr, | |
463 | GFP_KERNEL); | |
464 | if (!*p_v_addr) | |
465 | return -ENOMEM; | |
466 | ||
467 | p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) * | |
468 | num_vfs; | |
469 | p_v_addr = &p_iov_info->p_bulletins; | |
470 | *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, | |
471 | p_iov_info->bulletins_size, | |
472 | &p_iov_info->bulletins_phys, | |
473 | GFP_KERNEL); | |
474 | if (!*p_v_addr) | |
475 | return -ENOMEM; | |
476 | ||
477 | DP_VERBOSE(p_hwfn, | |
478 | QED_MSG_IOV, | |
479 | "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", | |
480 | p_iov_info->mbx_msg_virt_addr, | |
481 | (u64) p_iov_info->mbx_msg_phys_addr, | |
482 | p_iov_info->mbx_reply_virt_addr, | |
483 | (u64) p_iov_info->mbx_reply_phys_addr, | |
484 | p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys); | |
485 | ||
486 | return 0; | |
487 | } | |
488 | ||
489 | static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn) | |
490 | { | |
491 | struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; | |
492 | ||
493 | if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) | |
494 | dma_free_coherent(&p_hwfn->cdev->pdev->dev, | |
495 | p_iov_info->mbx_msg_size, | |
496 | p_iov_info->mbx_msg_virt_addr, | |
497 | p_iov_info->mbx_msg_phys_addr); | |
498 | ||
499 | if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) | |
500 | dma_free_coherent(&p_hwfn->cdev->pdev->dev, | |
501 | p_iov_info->mbx_reply_size, | |
502 | p_iov_info->mbx_reply_virt_addr, | |
503 | p_iov_info->mbx_reply_phys_addr); | |
504 | ||
505 | if (p_iov_info->p_bulletins) | |
506 | dma_free_coherent(&p_hwfn->cdev->pdev->dev, | |
507 | p_iov_info->bulletins_size, | |
508 | p_iov_info->p_bulletins, | |
509 | p_iov_info->bulletins_phys); | |
510 | } | |
511 | ||
512 | int qed_iov_alloc(struct qed_hwfn *p_hwfn) | |
513 | { | |
514 | struct qed_pf_iov *p_sriov; | |
515 | ||
516 | if (!IS_PF_SRIOV(p_hwfn)) { | |
517 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
518 | "No SR-IOV - no need for IOV db\n"); | |
519 | return 0; | |
520 | } | |
521 | ||
522 | p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL); | |
2591c280 | 523 | if (!p_sriov) |
32a47e72 | 524 | return -ENOMEM; |
32a47e72 YM |
525 | |
526 | p_hwfn->pf_iov_info = p_sriov; | |
527 | ||
528 | return qed_iov_allocate_vfdb(p_hwfn); | |
529 | } | |
530 | ||
1ee240e3 | 531 | void qed_iov_setup(struct qed_hwfn *p_hwfn) |
32a47e72 YM |
532 | { |
533 | if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) | |
534 | return; | |
535 | ||
536 | qed_iov_setup_vfdb(p_hwfn); | |
32a47e72 YM |
537 | } |
538 | ||
539 | void qed_iov_free(struct qed_hwfn *p_hwfn) | |
540 | { | |
541 | if (IS_PF_SRIOV_ALLOC(p_hwfn)) { | |
542 | qed_iov_free_vfdb(p_hwfn); | |
543 | kfree(p_hwfn->pf_iov_info); | |
544 | } | |
545 | } | |
546 | ||
547 | void qed_iov_free_hw_info(struct qed_dev *cdev) | |
548 | { | |
549 | kfree(cdev->p_iov_info); | |
550 | cdev->p_iov_info = NULL; | |
551 | } | |
552 | ||
553 | int qed_iov_hw_info(struct qed_hwfn *p_hwfn) | |
554 | { | |
555 | struct qed_dev *cdev = p_hwfn->cdev; | |
556 | int pos; | |
557 | int rc; | |
558 | ||
1408cc1f YM |
559 | if (IS_VF(p_hwfn->cdev)) |
560 | return 0; | |
561 | ||
32a47e72 YM |
562 | /* Learn the PCI configuration */ |
563 | pos = pci_find_ext_capability(p_hwfn->cdev->pdev, | |
564 | PCI_EXT_CAP_ID_SRIOV); | |
565 | if (!pos) { | |
566 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n"); | |
567 | return 0; | |
568 | } | |
569 | ||
570 | /* Allocate a new struct for IOV information */ | |
571 | cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL); | |
2591c280 | 572 | if (!cdev->p_iov_info) |
32a47e72 | 573 | return -ENOMEM; |
2591c280 | 574 | |
32a47e72 YM |
575 | cdev->p_iov_info->pos = pos; |
576 | ||
577 | rc = qed_iov_pci_cfg_info(cdev); | |
578 | if (rc) | |
579 | return rc; | |
580 | ||
581 | /* We want PF IOV to be synonemous with the existance of p_iov_info; | |
582 | * In case the capability is published but there are no VFs, simply | |
583 | * de-allocate the struct. | |
584 | */ | |
585 | if (!cdev->p_iov_info->total_vfs) { | |
586 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
587 | "IOV capabilities, but no VFs are published\n"); | |
588 | kfree(cdev->p_iov_info); | |
589 | cdev->p_iov_info = NULL; | |
590 | return 0; | |
591 | } | |
592 | ||
9c79ddaa MY |
593 | /* First VF index based on offset is tricky: |
594 | * - If ARI is supported [likely], offset - (16 - pf_id) would | |
595 | * provide the number for eng0. 2nd engine Vfs would begin | |
596 | * after the first engine's VFs. | |
597 | * - If !ARI, VFs would start on next device. | |
598 | * so offset - (256 - pf_id) would provide the number. | |
599 | * Utilize the fact that (256 - pf_id) is achieved only by later | |
8ac1ed79 | 600 | * to differentiate between the two. |
32a47e72 | 601 | */ |
9c79ddaa MY |
602 | |
603 | if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) { | |
604 | u32 first = p_hwfn->cdev->p_iov_info->offset + | |
605 | p_hwfn->abs_pf_id - 16; | |
606 | ||
607 | cdev->p_iov_info->first_vf_in_pf = first; | |
608 | ||
609 | if (QED_PATH_ID(p_hwfn)) | |
610 | cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; | |
611 | } else { | |
612 | u32 first = p_hwfn->cdev->p_iov_info->offset + | |
613 | p_hwfn->abs_pf_id - 256; | |
614 | ||
615 | cdev->p_iov_info->first_vf_in_pf = first; | |
616 | } | |
32a47e72 YM |
617 | |
618 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
619 | "First VF in hwfn 0x%08x\n", | |
620 | cdev->p_iov_info->first_vf_in_pf); | |
621 | ||
622 | return 0; | |
623 | } | |
624 | ||
7eff82b0 YM |
625 | bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, |
626 | int vfid, bool b_fail_malicious) | |
37bff2b9 YM |
627 | { |
628 | /* Check PF supports sriov */ | |
b0409fa0 YM |
629 | if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || |
630 | !IS_PF_SRIOV_ALLOC(p_hwfn)) | |
37bff2b9 YM |
631 | return false; |
632 | ||
633 | /* Check VF validity */ | |
7eff82b0 | 634 | if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious)) |
37bff2b9 YM |
635 | return false; |
636 | ||
637 | return true; | |
638 | } | |
639 | ||
7eff82b0 YM |
640 | bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) |
641 | { | |
642 | return _qed_iov_pf_sanity_check(p_hwfn, vfid, true); | |
643 | } | |
644 | ||
0b55e27d YM |
645 | static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, |
646 | u16 rel_vf_id, u8 to_disable) | |
647 | { | |
648 | struct qed_vf_info *vf; | |
649 | int i; | |
650 | ||
651 | for_each_hwfn(cdev, i) { | |
652 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | |
653 | ||
654 | vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); | |
655 | if (!vf) | |
656 | continue; | |
657 | ||
658 | vf->to_disable = to_disable; | |
659 | } | |
660 | } | |
661 | ||
ba56947a | 662 | static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable) |
0b55e27d YM |
663 | { |
664 | u16 i; | |
665 | ||
666 | if (!IS_QED_SRIOV(cdev)) | |
667 | return; | |
668 | ||
669 | for (i = 0; i < cdev->p_iov_info->total_vfs; i++) | |
670 | qed_iov_set_vf_to_disable(cdev, i, to_disable); | |
671 | } | |
672 | ||
1408cc1f YM |
673 | static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, |
674 | struct qed_ptt *p_ptt, u8 abs_vfid) | |
675 | { | |
676 | qed_wr(p_hwfn, p_ptt, | |
677 | PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, | |
678 | 1 << (abs_vfid & 0x1f)); | |
679 | } | |
680 | ||
dacd88d6 YM |
681 | static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, |
682 | struct qed_ptt *p_ptt, struct qed_vf_info *vf) | |
683 | { | |
dacd88d6 YM |
684 | int i; |
685 | ||
686 | /* Set VF masks and configuration - pretend */ | |
687 | qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); | |
688 | ||
689 | qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); | |
690 | ||
dacd88d6 YM |
691 | /* unpretend */ |
692 | qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); | |
693 | ||
694 | /* iterate over all queues, clear sb consumer */ | |
b2b897eb YM |
695 | for (i = 0; i < vf->num_sbs; i++) |
696 | qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, | |
697 | vf->igu_sbs[i], | |
698 | vf->opaque_fid, true); | |
dacd88d6 YM |
699 | } |
700 | ||
0b55e27d YM |
701 | static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, |
702 | struct qed_ptt *p_ptt, | |
703 | struct qed_vf_info *vf, bool enable) | |
704 | { | |
705 | u32 igu_vf_conf; | |
706 | ||
707 | qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); | |
708 | ||
709 | igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); | |
710 | ||
711 | if (enable) | |
712 | igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; | |
713 | else | |
714 | igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; | |
715 | ||
716 | qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); | |
717 | ||
718 | /* unpretend */ | |
719 | qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); | |
720 | } | |
721 | ||
88072fd4 MY |
722 | static int |
723 | qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn, | |
724 | struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs) | |
725 | { | |
726 | u8 current_max = 0; | |
727 | int i; | |
728 | ||
729 | /* For AH onward, configuration is per-PF. Find maximum of all | |
730 | * the currently enabled child VFs, and set the number to be that. | |
731 | */ | |
732 | if (!QED_IS_BB(p_hwfn->cdev)) { | |
733 | qed_for_each_vf(p_hwfn, i) { | |
734 | struct qed_vf_info *p_vf; | |
735 | ||
736 | p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true); | |
737 | if (!p_vf) | |
738 | continue; | |
739 | ||
740 | current_max = max_t(u8, current_max, p_vf->num_sbs); | |
741 | } | |
742 | } | |
743 | ||
744 | if (num_sbs > current_max) | |
745 | return qed_mcp_config_vf_msix(p_hwfn, p_ptt, | |
746 | abs_vf_id, num_sbs); | |
747 | ||
748 | return 0; | |
749 | } | |
750 | ||
1408cc1f YM |
751 | static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, |
752 | struct qed_ptt *p_ptt, | |
753 | struct qed_vf_info *vf) | |
754 | { | |
755 | u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; | |
756 | int rc; | |
757 | ||
4e9b2a67 MY |
758 | /* It's possible VF was previously considered malicious - |
759 | * clear the indication even if we're only going to disable VF. | |
760 | */ | |
761 | vf->b_malicious = false; | |
762 | ||
0b55e27d YM |
763 | if (vf->to_disable) |
764 | return 0; | |
765 | ||
1408cc1f YM |
766 | DP_VERBOSE(p_hwfn, |
767 | QED_MSG_IOV, | |
768 | "Enable internal access for vf %x [abs %x]\n", | |
769 | vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); | |
770 | ||
771 | qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf)); | |
772 | ||
b2b897eb YM |
773 | qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); |
774 | ||
88072fd4 MY |
775 | rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt, |
776 | vf->abs_vf_id, vf->num_sbs); | |
1408cc1f YM |
777 | if (rc) |
778 | return rc; | |
779 | ||
780 | qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); | |
781 | ||
782 | SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); | |
783 | STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); | |
784 | ||
785 | qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, | |
786 | p_hwfn->hw_info.hw_mode); | |
787 | ||
788 | /* unpretend */ | |
789 | qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); | |
790 | ||
1408cc1f YM |
791 | vf->state = VF_FREE; |
792 | ||
793 | return rc; | |
794 | } | |
795 | ||
0b55e27d YM |
796 | /** |
797 | * @brief qed_iov_config_perm_table - configure the permission | |
798 | * zone table. | |
799 | * In E4, queue zone permission table size is 320x9. There | |
800 | * are 320 VF queues for single engine device (256 for dual | |
801 | * engine device), and each entry has the following format: | |
802 | * {Valid, VF[7:0]} | |
803 | * @param p_hwfn | |
804 | * @param p_ptt | |
805 | * @param vf | |
806 | * @param enable | |
807 | */ | |
808 | static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, | |
809 | struct qed_ptt *p_ptt, | |
810 | struct qed_vf_info *vf, u8 enable) | |
811 | { | |
812 | u32 reg_addr, val; | |
813 | u16 qzone_id = 0; | |
814 | int qid; | |
815 | ||
816 | for (qid = 0; qid < vf->num_rxqs; qid++) { | |
817 | qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, | |
818 | &qzone_id); | |
819 | ||
820 | reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; | |
1a635e48 | 821 | val = enable ? (vf->abs_vf_id | BIT(8)) : 0; |
0b55e27d YM |
822 | qed_wr(p_hwfn, p_ptt, reg_addr, val); |
823 | } | |
824 | } | |
825 | ||
dacd88d6 YM |
826 | static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn, |
827 | struct qed_ptt *p_ptt, | |
828 | struct qed_vf_info *vf) | |
829 | { | |
830 | /* Reset vf in IGU - interrupts are still disabled */ | |
831 | qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); | |
832 | ||
833 | qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1); | |
834 | ||
835 | /* Permission Table */ | |
836 | qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true); | |
837 | } | |
838 | ||
1408cc1f YM |
839 | static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, |
840 | struct qed_ptt *p_ptt, | |
841 | struct qed_vf_info *vf, u16 num_rx_queues) | |
842 | { | |
09b6b147 MY |
843 | struct qed_igu_block *p_block; |
844 | struct cau_sb_entry sb_entry; | |
845 | int qid = 0; | |
1408cc1f YM |
846 | u32 val = 0; |
847 | ||
726fdbe9 MY |
848 | if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov) |
849 | num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov; | |
850 | p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues; | |
1408cc1f YM |
851 | |
852 | SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); | |
853 | SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); | |
854 | SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); | |
855 | ||
09b6b147 MY |
856 | for (qid = 0; qid < num_rx_queues; qid++) { |
857 | p_block = qed_get_igu_free_sb(p_hwfn, false); | |
858 | vf->igu_sbs[qid] = p_block->igu_sb_id; | |
859 | p_block->status &= ~QED_IGU_STATUS_FREE; | |
860 | SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); | |
861 | ||
862 | qed_wr(p_hwfn, p_ptt, | |
863 | IGU_REG_MAPPING_MEMORY + | |
864 | sizeof(u32) * p_block->igu_sb_id, val); | |
865 | ||
866 | /* Configure igu sb in CAU which were marked valid */ | |
867 | qed_init_cau_sb_entry(p_hwfn, &sb_entry, | |
868 | p_hwfn->rel_pf_id, vf->abs_vf_id, 1); | |
869 | qed_dmae_host2grc(p_hwfn, p_ptt, | |
870 | (u64)(uintptr_t)&sb_entry, | |
871 | CAU_REG_SB_VAR_MEMORY + | |
872 | p_block->igu_sb_id * sizeof(u64), 2, 0); | |
1408cc1f YM |
873 | } |
874 | ||
875 | vf->num_sbs = (u8) num_rx_queues; | |
876 | ||
877 | return vf->num_sbs; | |
878 | } | |
879 | ||
0b55e27d YM |
880 | static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, |
881 | struct qed_ptt *p_ptt, | |
882 | struct qed_vf_info *vf) | |
883 | { | |
884 | struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; | |
885 | int idx, igu_id; | |
886 | u32 addr, val; | |
887 | ||
888 | /* Invalidate igu CAM lines and mark them as free */ | |
889 | for (idx = 0; idx < vf->num_sbs; idx++) { | |
890 | igu_id = vf->igu_sbs[idx]; | |
891 | addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id; | |
892 | ||
893 | val = qed_rd(p_hwfn, p_ptt, addr); | |
894 | SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); | |
895 | qed_wr(p_hwfn, p_ptt, addr, val); | |
896 | ||
d749dd0d | 897 | p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE; |
726fdbe9 | 898 | p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++; |
0b55e27d YM |
899 | } |
900 | ||
901 | vf->num_sbs = 0; | |
902 | } | |
903 | ||
33b2fbd0 MY |
904 | static void qed_iov_set_link(struct qed_hwfn *p_hwfn, |
905 | u16 vfid, | |
906 | struct qed_mcp_link_params *params, | |
907 | struct qed_mcp_link_state *link, | |
908 | struct qed_mcp_link_capabilities *p_caps) | |
909 | { | |
910 | struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, | |
911 | vfid, | |
912 | false); | |
913 | struct qed_bulletin_content *p_bulletin; | |
914 | ||
915 | if (!p_vf) | |
916 | return; | |
917 | ||
918 | p_bulletin = p_vf->bulletin.p_virt; | |
919 | p_bulletin->req_autoneg = params->speed.autoneg; | |
920 | p_bulletin->req_adv_speed = params->speed.advertised_speeds; | |
921 | p_bulletin->req_forced_speed = params->speed.forced_speed; | |
922 | p_bulletin->req_autoneg_pause = params->pause.autoneg; | |
923 | p_bulletin->req_forced_rx = params->pause.forced_rx; | |
924 | p_bulletin->req_forced_tx = params->pause.forced_tx; | |
925 | p_bulletin->req_loopback = params->loopback_mode; | |
926 | ||
927 | p_bulletin->link_up = link->link_up; | |
928 | p_bulletin->speed = link->speed; | |
929 | p_bulletin->full_duplex = link->full_duplex; | |
930 | p_bulletin->autoneg = link->an; | |
931 | p_bulletin->autoneg_complete = link->an_complete; | |
932 | p_bulletin->parallel_detection = link->parallel_detection; | |
933 | p_bulletin->pfc_enabled = link->pfc_enabled; | |
934 | p_bulletin->partner_adv_speed = link->partner_adv_speed; | |
935 | p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; | |
936 | p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; | |
937 | p_bulletin->partner_adv_pause = link->partner_adv_pause; | |
938 | p_bulletin->sfp_tx_fault = link->sfp_tx_fault; | |
939 | ||
940 | p_bulletin->capability_speed = p_caps->speed_capabilities; | |
941 | } | |
942 | ||
1408cc1f YM |
943 | static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, |
944 | struct qed_ptt *p_ptt, | |
3da7a37a | 945 | struct qed_iov_vf_init_params *p_params) |
1408cc1f | 946 | { |
33b2fbd0 MY |
947 | struct qed_mcp_link_capabilities link_caps; |
948 | struct qed_mcp_link_params link_params; | |
949 | struct qed_mcp_link_state link_state; | |
1408cc1f YM |
950 | u8 num_of_vf_avaiable_chains = 0; |
951 | struct qed_vf_info *vf = NULL; | |
3da7a37a | 952 | u16 qid, num_irqs; |
1408cc1f YM |
953 | int rc = 0; |
954 | u32 cids; | |
955 | u8 i; | |
956 | ||
3da7a37a | 957 | vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); |
1408cc1f YM |
958 | if (!vf) { |
959 | DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); | |
960 | return -EINVAL; | |
961 | } | |
962 | ||
963 | if (vf->b_init) { | |
3da7a37a MY |
964 | DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", |
965 | p_params->rel_vf_id); | |
1408cc1f YM |
966 | return -EINVAL; |
967 | } | |
968 | ||
3da7a37a MY |
969 | /* Perform sanity checking on the requested queue_id */ |
970 | for (i = 0; i < p_params->num_queues; i++) { | |
971 | u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE); | |
972 | u16 max_vf_qzone = min_vf_qzone + | |
973 | FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1; | |
974 | ||
975 | qid = p_params->req_rx_queue[i]; | |
976 | if (qid < min_vf_qzone || qid > max_vf_qzone) { | |
977 | DP_NOTICE(p_hwfn, | |
978 | "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n", | |
979 | qid, | |
980 | p_params->rel_vf_id, | |
981 | min_vf_qzone, max_vf_qzone); | |
982 | return -EINVAL; | |
983 | } | |
984 | ||
985 | qid = p_params->req_tx_queue[i]; | |
986 | if (qid > max_vf_qzone) { | |
987 | DP_NOTICE(p_hwfn, | |
988 | "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n", | |
989 | qid, p_params->rel_vf_id, max_vf_qzone); | |
990 | return -EINVAL; | |
991 | } | |
992 | ||
993 | /* If client *really* wants, Tx qid can be shared with PF */ | |
994 | if (qid < min_vf_qzone) | |
995 | DP_VERBOSE(p_hwfn, | |
996 | QED_MSG_IOV, | |
997 | "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n", | |
998 | p_params->rel_vf_id, qid, i); | |
999 | } | |
1000 | ||
1408cc1f YM |
1001 | /* Limit number of queues according to number of CIDs */ |
1002 | qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); | |
1003 | DP_VERBOSE(p_hwfn, | |
1004 | QED_MSG_IOV, | |
1005 | "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", | |
3da7a37a MY |
1006 | vf->relative_vf_id, p_params->num_queues, (u16)cids); |
1007 | num_irqs = min_t(u16, p_params->num_queues, ((u16)cids)); | |
1408cc1f YM |
1008 | |
1009 | num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn, | |
1010 | p_ptt, | |
3da7a37a | 1011 | vf, num_irqs); |
1408cc1f YM |
1012 | if (!num_of_vf_avaiable_chains) { |
1013 | DP_ERR(p_hwfn, "no available igu sbs\n"); | |
1014 | return -ENOMEM; | |
1015 | } | |
1016 | ||
1017 | /* Choose queue number and index ranges */ | |
1018 | vf->num_rxqs = num_of_vf_avaiable_chains; | |
1019 | vf->num_txqs = num_of_vf_avaiable_chains; | |
1020 | ||
1021 | for (i = 0; i < vf->num_rxqs; i++) { | |
3da7a37a | 1022 | struct qed_vf_q_info *p_queue = &vf->vf_queues[i]; |
1408cc1f | 1023 | |
3da7a37a MY |
1024 | p_queue->fw_rx_qid = p_params->req_rx_queue[i]; |
1025 | p_queue->fw_tx_qid = p_params->req_tx_queue[i]; | |
1408cc1f YM |
1026 | |
1027 | /* CIDs are per-VF, so no problem having them 0-based. */ | |
3da7a37a | 1028 | p_queue->fw_cid = i; |
1408cc1f YM |
1029 | |
1030 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
3da7a37a MY |
1031 | "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n", |
1032 | vf->relative_vf_id, | |
1033 | i, vf->igu_sbs[i], | |
1034 | p_queue->fw_rx_qid, | |
1035 | p_queue->fw_tx_qid, p_queue->fw_cid); | |
1408cc1f | 1036 | } |
3da7a37a | 1037 | |
33b2fbd0 MY |
1038 | /* Update the link configuration in bulletin */ |
1039 | memcpy(&link_params, qed_mcp_get_link_params(p_hwfn), | |
1040 | sizeof(link_params)); | |
1041 | memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state)); | |
1042 | memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn), | |
1043 | sizeof(link_caps)); | |
1044 | qed_iov_set_link(p_hwfn, p_params->rel_vf_id, | |
1045 | &link_params, &link_state, &link_caps); | |
1046 | ||
1408cc1f YM |
1047 | rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); |
1048 | if (!rc) { | |
1049 | vf->b_init = true; | |
1050 | ||
1051 | if (IS_LEAD_HWFN(p_hwfn)) | |
1052 | p_hwfn->cdev->p_iov_info->num_vfs++; | |
1053 | } | |
1054 | ||
1055 | return rc; | |
1056 | } | |
1057 | ||
0b55e27d YM |
1058 | static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, |
1059 | struct qed_ptt *p_ptt, u16 rel_vf_id) | |
1060 | { | |
079d20a6 MC |
1061 | struct qed_mcp_link_capabilities caps; |
1062 | struct qed_mcp_link_params params; | |
1063 | struct qed_mcp_link_state link; | |
0b55e27d | 1064 | struct qed_vf_info *vf = NULL; |
0b55e27d YM |
1065 | |
1066 | vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); | |
1067 | if (!vf) { | |
1068 | DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n"); | |
1069 | return -EINVAL; | |
1070 | } | |
1071 | ||
36558c3d YM |
1072 | if (vf->bulletin.p_virt) |
1073 | memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt)); | |
1074 | ||
1075 | memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); | |
1076 | ||
079d20a6 MC |
1077 | /* Get the link configuration back in bulletin so |
1078 | * that when VFs are re-enabled they get the actual | |
1079 | * link configuration. | |
1080 | */ | |
1081 | memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params)); | |
1082 | memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link)); | |
1083 | memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); | |
1084 | qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps); | |
1085 | ||
1fe614d1 YM |
1086 | /* Forget the VF's acquisition message */ |
1087 | memset(&vf->acquire, 0, sizeof(vf->acquire)); | |
0b55e27d YM |
1088 | |
1089 | /* disablng interrupts and resetting permission table was done during | |
1090 | * vf-close, however, we could get here without going through vf_close | |
1091 | */ | |
1092 | /* Disable Interrupts for VF */ | |
1093 | qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); | |
1094 | ||
1095 | /* Reset Permission table */ | |
1096 | qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); | |
1097 | ||
1098 | vf->num_rxqs = 0; | |
1099 | vf->num_txqs = 0; | |
1100 | qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); | |
1101 | ||
1102 | if (vf->b_init) { | |
1103 | vf->b_init = false; | |
1104 | ||
1105 | if (IS_LEAD_HWFN(p_hwfn)) | |
1106 | p_hwfn->cdev->p_iov_info->num_vfs--; | |
1107 | } | |
1108 | ||
1109 | return 0; | |
1110 | } | |
1111 | ||
37bff2b9 YM |
1112 | static bool qed_iov_tlv_supported(u16 tlvtype) |
1113 | { | |
1114 | return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; | |
1115 | } | |
1116 | ||
1117 | /* place a given tlv on the tlv buffer, continuing current tlv list */ | |
1118 | void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) | |
1119 | { | |
1120 | struct channel_tlv *tl = (struct channel_tlv *)*offset; | |
1121 | ||
1122 | tl->type = type; | |
1123 | tl->length = length; | |
1124 | ||
1125 | /* Offset should keep pointing to next TLV (the end of the last) */ | |
1126 | *offset += length; | |
1127 | ||
1128 | /* Return a pointer to the start of the added tlv */ | |
1129 | return *offset - length; | |
1130 | } | |
1131 | ||
1132 | /* list the types and lengths of the tlvs on the buffer */ | |
1133 | void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list) | |
1134 | { | |
1135 | u16 i = 1, total_length = 0; | |
1136 | struct channel_tlv *tlv; | |
1137 | ||
1138 | do { | |
1139 | tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); | |
1140 | ||
1141 | /* output tlv */ | |
1142 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
1143 | "TLV number %d: type %d, length %d\n", | |
1144 | i, tlv->type, tlv->length); | |
1145 | ||
1146 | if (tlv->type == CHANNEL_TLV_LIST_END) | |
1147 | return; | |
1148 | ||
1149 | /* Validate entry - protect against malicious VFs */ | |
1150 | if (!tlv->length) { | |
1151 | DP_NOTICE(p_hwfn, "TLV of length 0 found\n"); | |
1152 | return; | |
1153 | } | |
1154 | ||
1155 | total_length += tlv->length; | |
1156 | ||
1157 | if (total_length >= sizeof(struct tlv_buffer_size)) { | |
1158 | DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n"); | |
1159 | return; | |
1160 | } | |
1161 | ||
1162 | i++; | |
1163 | } while (1); | |
1164 | } | |
1165 | ||
1166 | static void qed_iov_send_response(struct qed_hwfn *p_hwfn, | |
1167 | struct qed_ptt *p_ptt, | |
1168 | struct qed_vf_info *p_vf, | |
1169 | u16 length, u8 status) | |
1170 | { | |
1171 | struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; | |
1172 | struct qed_dmae_params params; | |
1173 | u8 eng_vf_id; | |
1174 | ||
1175 | mbx->reply_virt->default_resp.hdr.status = status; | |
1176 | ||
1177 | qed_dp_tlv_list(p_hwfn, mbx->reply_virt); | |
1178 | ||
1179 | eng_vf_id = p_vf->abs_vf_id; | |
1180 | ||
1181 | memset(¶ms, 0, sizeof(struct qed_dmae_params)); | |
1182 | params.flags = QED_DMAE_FLAG_VF_DST; | |
1183 | params.dst_vfid = eng_vf_id; | |
1184 | ||
1185 | qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), | |
1186 | mbx->req_virt->first_tlv.reply_address + | |
1187 | sizeof(u64), | |
1188 | (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, | |
1189 | ¶ms); | |
1190 | ||
d9194081 MY |
1191 | /* Once PF copies the rc to the VF, the latter can continue |
1192 | * and send an additional message. So we have to make sure the | |
1193 | * channel would be re-set to ready prior to that. | |
1194 | */ | |
37bff2b9 YM |
1195 | REG_WR(p_hwfn, |
1196 | GTT_BAR0_MAP_REG_USDM_RAM + | |
1197 | USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); | |
d9194081 MY |
1198 | |
1199 | qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, | |
1200 | mbx->req_virt->first_tlv.reply_address, | |
1201 | sizeof(u64) / 4, ¶ms); | |
37bff2b9 YM |
1202 | } |
1203 | ||
dacd88d6 YM |
1204 | static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn, |
1205 | enum qed_iov_vport_update_flag flag) | |
1206 | { | |
1207 | switch (flag) { | |
1208 | case QED_IOV_VP_UPDATE_ACTIVATE: | |
1209 | return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; | |
17b235c1 YM |
1210 | case QED_IOV_VP_UPDATE_VLAN_STRIP: |
1211 | return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; | |
1212 | case QED_IOV_VP_UPDATE_TX_SWITCH: | |
1213 | return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; | |
dacd88d6 YM |
1214 | case QED_IOV_VP_UPDATE_MCAST: |
1215 | return CHANNEL_TLV_VPORT_UPDATE_MCAST; | |
1216 | case QED_IOV_VP_UPDATE_ACCEPT_PARAM: | |
1217 | return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; | |
1218 | case QED_IOV_VP_UPDATE_RSS: | |
1219 | return CHANNEL_TLV_VPORT_UPDATE_RSS; | |
17b235c1 YM |
1220 | case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: |
1221 | return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; | |
1222 | case QED_IOV_VP_UPDATE_SGE_TPA: | |
1223 | return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; | |
dacd88d6 YM |
1224 | default: |
1225 | return 0; | |
1226 | } | |
1227 | } | |
1228 | ||
1229 | static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn, | |
1230 | struct qed_vf_info *p_vf, | |
1231 | struct qed_iov_vf_mbx *p_mbx, | |
1232 | u8 status, | |
1233 | u16 tlvs_mask, u16 tlvs_accepted) | |
1234 | { | |
1235 | struct pfvf_def_resp_tlv *resp; | |
1236 | u16 size, total_len, i; | |
1237 | ||
1238 | memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); | |
1239 | p_mbx->offset = (u8 *)p_mbx->reply_virt; | |
1240 | size = sizeof(struct pfvf_def_resp_tlv); | |
1241 | total_len = size; | |
1242 | ||
1243 | qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); | |
1244 | ||
1245 | /* Prepare response for all extended tlvs if they are found by PF */ | |
1246 | for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) { | |
1a635e48 | 1247 | if (!(tlvs_mask & BIT(i))) |
dacd88d6 YM |
1248 | continue; |
1249 | ||
1250 | resp = qed_add_tlv(p_hwfn, &p_mbx->offset, | |
1251 | qed_iov_vport_to_tlv(p_hwfn, i), size); | |
1252 | ||
1a635e48 | 1253 | if (tlvs_accepted & BIT(i)) |
dacd88d6 YM |
1254 | resp->hdr.status = status; |
1255 | else | |
1256 | resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; | |
1257 | ||
1258 | DP_VERBOSE(p_hwfn, | |
1259 | QED_MSG_IOV, | |
1260 | "VF[%d] - vport_update response: TLV %d, status %02x\n", | |
1261 | p_vf->relative_vf_id, | |
1262 | qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); | |
1263 | ||
1264 | total_len += size; | |
1265 | } | |
1266 | ||
1267 | qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END, | |
1268 | sizeof(struct channel_list_end_tlv)); | |
1269 | ||
1270 | return total_len; | |
1271 | } | |
1272 | ||
37bff2b9 YM |
1273 | static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, |
1274 | struct qed_ptt *p_ptt, | |
1275 | struct qed_vf_info *vf_info, | |
1276 | u16 type, u16 length, u8 status) | |
1277 | { | |
1278 | struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx; | |
1279 | ||
1280 | mbx->offset = (u8 *)mbx->reply_virt; | |
1281 | ||
1282 | qed_add_tlv(p_hwfn, &mbx->offset, type, length); | |
1283 | qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, | |
1284 | sizeof(struct channel_list_end_tlv)); | |
1285 | ||
1286 | qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); | |
1287 | } | |
1288 | ||
ba56947a BX |
1289 | static struct |
1290 | qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn, | |
1291 | u16 relative_vf_id, | |
1292 | bool b_enabled_only) | |
0b55e27d YM |
1293 | { |
1294 | struct qed_vf_info *vf = NULL; | |
1295 | ||
1296 | vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); | |
1297 | if (!vf) | |
1298 | return NULL; | |
1299 | ||
1300 | return &vf->p_vf_info; | |
1301 | } | |
1302 | ||
ba56947a | 1303 | static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) |
0b55e27d YM |
1304 | { |
1305 | struct qed_public_vf_info *vf_info; | |
1306 | ||
1307 | vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false); | |
1308 | ||
1309 | if (!vf_info) | |
1310 | return; | |
1311 | ||
1312 | /* Clear the VF mac */ | |
0ee28e31 | 1313 | eth_zero_addr(vf_info->mac); |
f990c82c MY |
1314 | |
1315 | vf_info->rx_accept_mode = 0; | |
1316 | vf_info->tx_accept_mode = 0; | |
0b55e27d YM |
1317 | } |
1318 | ||
1319 | static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, | |
1320 | struct qed_vf_info *p_vf) | |
1321 | { | |
1322 | u32 i; | |
1323 | ||
1324 | p_vf->vf_bulletin = 0; | |
dacd88d6 | 1325 | p_vf->vport_instance = 0; |
08feecd7 | 1326 | p_vf->configured_features = 0; |
0b55e27d YM |
1327 | |
1328 | /* If VF previously requested less resources, go back to default */ | |
1329 | p_vf->num_rxqs = p_vf->num_sbs; | |
1330 | p_vf->num_txqs = p_vf->num_sbs; | |
1331 | ||
dacd88d6 YM |
1332 | p_vf->num_active_rxqs = 0; |
1333 | ||
3da7a37a MY |
1334 | for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { |
1335 | struct qed_vf_q_info *p_queue = &p_vf->vf_queues[i]; | |
1336 | ||
1337 | if (p_queue->p_rx_cid) { | |
1338 | qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid); | |
1339 | p_queue->p_rx_cid = NULL; | |
1340 | } | |
1341 | ||
1342 | if (p_queue->p_tx_cid) { | |
1343 | qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid); | |
1344 | p_queue->p_tx_cid = NULL; | |
1345 | } | |
1346 | } | |
0b55e27d | 1347 | |
08feecd7 | 1348 | memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); |
1fe614d1 | 1349 | memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); |
0b55e27d YM |
1350 | qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); |
1351 | } | |
1352 | ||
1cf2b1a9 YM |
1353 | static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, |
1354 | struct qed_ptt *p_ptt, | |
1355 | struct qed_vf_info *p_vf, | |
1356 | struct vf_pf_resc_request *p_req, | |
1357 | struct pf_vf_resc *p_resp) | |
1358 | { | |
1359 | int i; | |
1360 | ||
1361 | /* Queue related information */ | |
1362 | p_resp->num_rxqs = p_vf->num_rxqs; | |
1363 | p_resp->num_txqs = p_vf->num_txqs; | |
1364 | p_resp->num_sbs = p_vf->num_sbs; | |
1365 | ||
1366 | for (i = 0; i < p_resp->num_sbs; i++) { | |
1367 | p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; | |
1368 | p_resp->hw_sbs[i].sb_qid = 0; | |
1369 | } | |
1370 | ||
1371 | /* These fields are filled for backward compatibility. | |
1372 | * Unused by modern vfs. | |
1373 | */ | |
1374 | for (i = 0; i < p_resp->num_rxqs; i++) { | |
1375 | qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, | |
1376 | (u16 *)&p_resp->hw_qid[i]); | |
1377 | p_resp->cid[i] = p_vf->vf_queues[i].fw_cid; | |
1378 | } | |
1379 | ||
1380 | /* Filter related information */ | |
1381 | p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters, | |
1382 | p_req->num_mac_filters); | |
1383 | p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, | |
1384 | p_req->num_vlan_filters); | |
1385 | ||
1386 | /* This isn't really needed/enforced, but some legacy VFs might depend | |
1387 | * on the correct filling of this field. | |
1388 | */ | |
1389 | p_resp->num_mc_filters = QED_MAX_MC_ADDRS; | |
1390 | ||
1391 | /* Validate sufficient resources for VF */ | |
1392 | if (p_resp->num_rxqs < p_req->num_rxqs || | |
1393 | p_resp->num_txqs < p_req->num_txqs || | |
1394 | p_resp->num_sbs < p_req->num_sbs || | |
1395 | p_resp->num_mac_filters < p_req->num_mac_filters || | |
1396 | p_resp->num_vlan_filters < p_req->num_vlan_filters || | |
1397 | p_resp->num_mc_filters < p_req->num_mc_filters) { | |
1398 | DP_VERBOSE(p_hwfn, | |
1399 | QED_MSG_IOV, | |
1400 | "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n", | |
1401 | p_vf->abs_vf_id, | |
1402 | p_req->num_rxqs, | |
1403 | p_resp->num_rxqs, | |
1404 | p_req->num_rxqs, | |
1405 | p_resp->num_txqs, | |
1406 | p_req->num_sbs, | |
1407 | p_resp->num_sbs, | |
1408 | p_req->num_mac_filters, | |
1409 | p_resp->num_mac_filters, | |
1410 | p_req->num_vlan_filters, | |
1411 | p_resp->num_vlan_filters, | |
1412 | p_req->num_mc_filters, p_resp->num_mc_filters); | |
a044df83 YM |
1413 | |
1414 | /* Some legacy OSes are incapable of correctly handling this | |
1415 | * failure. | |
1416 | */ | |
1417 | if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == | |
1418 | ETH_HSI_VER_NO_PKT_LEN_TUNN) && | |
1419 | (p_vf->acquire.vfdev_info.os_type == | |
1420 | VFPF_ACQUIRE_OS_WINDOWS)) | |
1421 | return PFVF_STATUS_SUCCESS; | |
1422 | ||
1cf2b1a9 YM |
1423 | return PFVF_STATUS_NO_RESOURCE; |
1424 | } | |
1425 | ||
1426 | return PFVF_STATUS_SUCCESS; | |
1427 | } | |
1428 | ||
1429 | static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn, | |
1430 | struct pfvf_stats_info *p_stats) | |
1431 | { | |
1432 | p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B + | |
1433 | offsetof(struct mstorm_vf_zone, | |
1434 | non_trigger.eth_queue_stat); | |
1435 | p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); | |
1436 | p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B + | |
1437 | offsetof(struct ustorm_vf_zone, | |
1438 | non_trigger.eth_queue_stat); | |
1439 | p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); | |
1440 | p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B + | |
1441 | offsetof(struct pstorm_vf_zone, | |
1442 | non_trigger.eth_queue_stat); | |
1443 | p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); | |
1444 | p_stats->tstats.address = 0; | |
1445 | p_stats->tstats.len = 0; | |
1446 | } | |
1447 | ||
1408cc1f YM |
1448 | static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, |
1449 | struct qed_ptt *p_ptt, | |
1450 | struct qed_vf_info *vf) | |
37bff2b9 | 1451 | { |
1408cc1f YM |
1452 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; |
1453 | struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; | |
1454 | struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; | |
1455 | struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; | |
1cf2b1a9 | 1456 | u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED; |
1408cc1f | 1457 | struct pf_vf_resc *resc = &resp->resc; |
1fe614d1 YM |
1458 | int rc; |
1459 | ||
1460 | memset(resp, 0, sizeof(*resp)); | |
1408cc1f | 1461 | |
05fafbfb YM |
1462 | /* Write the PF version so that VF would know which version |
1463 | * is supported - might be later overriden. This guarantees that | |
1464 | * VF could recognize legacy PF based on lack of versions in reply. | |
1465 | */ | |
1466 | pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; | |
1467 | pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; | |
1468 | ||
a044df83 YM |
1469 | if (vf->state != VF_FREE && vf->state != VF_STOPPED) { |
1470 | DP_VERBOSE(p_hwfn, | |
1471 | QED_MSG_IOV, | |
1472 | "VF[%d] sent ACQUIRE but is already in state %d - fail request\n", | |
1473 | vf->abs_vf_id, vf->state); | |
1474 | goto out; | |
1475 | } | |
1476 | ||
1408cc1f | 1477 | /* Validate FW compatibility */ |
1fe614d1 | 1478 | if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { |
a044df83 YM |
1479 | if (req->vfdev_info.capabilities & |
1480 | VFPF_ACQUIRE_CAP_PRE_FP_HSI) { | |
1481 | struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info; | |
1fe614d1 | 1482 | |
a044df83 YM |
1483 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
1484 | "VF[%d] is pre-fastpath HSI\n", | |
1485 | vf->abs_vf_id); | |
1486 | p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR; | |
1487 | p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; | |
1488 | } else { | |
1489 | DP_INFO(p_hwfn, | |
1490 | "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", | |
1491 | vf->abs_vf_id, | |
1492 | req->vfdev_info.eth_fp_hsi_major, | |
1493 | req->vfdev_info.eth_fp_hsi_minor, | |
1494 | ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); | |
1495 | ||
1496 | goto out; | |
1497 | } | |
1408cc1f YM |
1498 | } |
1499 | ||
1500 | /* On 100g PFs, prevent old VFs from loading */ | |
1501 | if ((p_hwfn->cdev->num_hwfns > 1) && | |
1502 | !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { | |
1503 | DP_INFO(p_hwfn, | |
1504 | "VF[%d] is running an old driver that doesn't support 100g\n", | |
1505 | vf->abs_vf_id); | |
1408cc1f YM |
1506 | goto out; |
1507 | } | |
1508 | ||
1fe614d1 YM |
1509 | /* Store the acquire message */ |
1510 | memcpy(&vf->acquire, req, sizeof(vf->acquire)); | |
1408cc1f | 1511 | |
1408cc1f | 1512 | vf->opaque_fid = req->vfdev_info.opaque_fid; |
1408cc1f YM |
1513 | |
1514 | vf->vf_bulletin = req->bulletin_addr; | |
1515 | vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? | |
1516 | vf->bulletin.size : req->bulletin_size; | |
1517 | ||
1518 | /* fill in pfdev info */ | |
1519 | pfdev_info->chip_num = p_hwfn->cdev->chip_num; | |
1520 | pfdev_info->db_size = 0; | |
1521 | pfdev_info->indices_per_sb = PIS_PER_SB; | |
1522 | ||
1523 | pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | | |
1524 | PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; | |
1525 | if (p_hwfn->cdev->num_hwfns > 1) | |
1526 | pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; | |
1527 | ||
1cf2b1a9 | 1528 | qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); |
1408cc1f YM |
1529 | |
1530 | memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); | |
1531 | ||
1532 | pfdev_info->fw_major = FW_MAJOR_VERSION; | |
1533 | pfdev_info->fw_minor = FW_MINOR_VERSION; | |
1534 | pfdev_info->fw_rev = FW_REVISION_VERSION; | |
1535 | pfdev_info->fw_eng = FW_ENGINEERING_VERSION; | |
a044df83 YM |
1536 | |
1537 | /* Incorrect when legacy, but doesn't matter as legacy isn't reading | |
1538 | * this field. | |
1539 | */ | |
1a635e48 | 1540 | pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR, |
1fe614d1 | 1541 | req->vfdev_info.eth_fp_hsi_minor); |
1408cc1f YM |
1542 | pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; |
1543 | qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); | |
1544 | ||
1545 | pfdev_info->dev_type = p_hwfn->cdev->type; | |
1546 | pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; | |
1547 | ||
1cf2b1a9 YM |
1548 | /* Fill resources available to VF; Make sure there are enough to |
1549 | * satisfy the VF's request. | |
1408cc1f | 1550 | */ |
1cf2b1a9 YM |
1551 | vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf, |
1552 | &req->resc_request, resc); | |
1553 | if (vfpf_status != PFVF_STATUS_SUCCESS) | |
1554 | goto out; | |
1408cc1f | 1555 | |
1fe614d1 YM |
1556 | /* Start the VF in FW */ |
1557 | rc = qed_sp_vf_start(p_hwfn, vf); | |
1558 | if (rc) { | |
1559 | DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); | |
1560 | vfpf_status = PFVF_STATUS_FAILURE; | |
1561 | goto out; | |
1562 | } | |
1563 | ||
1408cc1f YM |
1564 | /* Fill agreed size of bulletin board in response */ |
1565 | resp->bulletin_size = vf->bulletin.size; | |
36558c3d | 1566 | qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); |
1408cc1f YM |
1567 | |
1568 | DP_VERBOSE(p_hwfn, | |
1569 | QED_MSG_IOV, | |
1570 | "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n" | |
1571 | "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n", | |
1572 | vf->abs_vf_id, | |
1573 | resp->pfdev_info.chip_num, | |
1574 | resp->pfdev_info.db_size, | |
1575 | resp->pfdev_info.indices_per_sb, | |
1576 | resp->pfdev_info.capabilities, | |
1577 | resc->num_rxqs, | |
1578 | resc->num_txqs, | |
1579 | resc->num_sbs, | |
1580 | resc->num_mac_filters, | |
1581 | resc->num_vlan_filters); | |
1582 | vf->state = VF_ACQUIRED; | |
1583 | ||
1584 | /* Prepare Response */ | |
1585 | out: | |
1586 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE, | |
1587 | sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); | |
37bff2b9 YM |
1588 | } |
1589 | ||
6ddc7608 YM |
1590 | static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, |
1591 | struct qed_vf_info *p_vf, bool val) | |
1592 | { | |
1593 | struct qed_sp_vport_update_params params; | |
1594 | int rc; | |
1595 | ||
1596 | if (val == p_vf->spoof_chk) { | |
1597 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
1598 | "Spoofchk value[%d] is already configured\n", val); | |
1599 | return 0; | |
1600 | } | |
1601 | ||
1602 | memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params)); | |
1603 | params.opaque_fid = p_vf->opaque_fid; | |
1604 | params.vport_id = p_vf->vport_id; | |
1605 | params.update_anti_spoofing_en_flg = 1; | |
1606 | params.anti_spoofing_en = val; | |
1607 | ||
1608 | rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); | |
cb1fa088 | 1609 | if (!rc) { |
6ddc7608 YM |
1610 | p_vf->spoof_chk = val; |
1611 | p_vf->req_spoofchk_val = p_vf->spoof_chk; | |
1612 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
1613 | "Spoofchk val[%d] configured\n", val); | |
1614 | } else { | |
1615 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
1616 | "Spoofchk configuration[val:%d] failed for VF[%d]\n", | |
1617 | val, p_vf->relative_vf_id); | |
1618 | } | |
1619 | ||
1620 | return rc; | |
1621 | } | |
1622 | ||
08feecd7 YM |
1623 | static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn, |
1624 | struct qed_vf_info *p_vf) | |
1625 | { | |
1626 | struct qed_filter_ucast filter; | |
1627 | int rc = 0; | |
1628 | int i; | |
1629 | ||
1630 | memset(&filter, 0, sizeof(filter)); | |
1631 | filter.is_rx_filter = 1; | |
1632 | filter.is_tx_filter = 1; | |
1633 | filter.vport_to_add_to = p_vf->vport_id; | |
1634 | filter.opcode = QED_FILTER_ADD; | |
1635 | ||
1636 | /* Reconfigure vlans */ | |
1637 | for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { | |
1638 | if (!p_vf->shadow_config.vlans[i].used) | |
1639 | continue; | |
1640 | ||
1641 | filter.type = QED_FILTER_VLAN; | |
1642 | filter.vlan = p_vf->shadow_config.vlans[i].vid; | |
1a635e48 | 1643 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
08feecd7 YM |
1644 | "Reconfiguring VLAN [0x%04x] for VF [%04x]\n", |
1645 | filter.vlan, p_vf->relative_vf_id); | |
1a635e48 YM |
1646 | rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, |
1647 | &filter, QED_SPQ_MODE_CB, NULL); | |
08feecd7 YM |
1648 | if (rc) { |
1649 | DP_NOTICE(p_hwfn, | |
1650 | "Failed to configure VLAN [%04x] to VF [%04x]\n", | |
1651 | filter.vlan, p_vf->relative_vf_id); | |
1652 | break; | |
1653 | } | |
1654 | } | |
1655 | ||
1656 | return rc; | |
1657 | } | |
1658 | ||
1659 | static int | |
1660 | qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn, | |
1661 | struct qed_vf_info *p_vf, u64 events) | |
1662 | { | |
1663 | int rc = 0; | |
1664 | ||
1a635e48 | 1665 | if ((events & BIT(VLAN_ADDR_FORCED)) && |
08feecd7 YM |
1666 | !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) |
1667 | rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); | |
1668 | ||
1669 | return rc; | |
1670 | } | |
1671 | ||
1672 | static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, | |
1673 | struct qed_vf_info *p_vf, u64 events) | |
1674 | { | |
1675 | int rc = 0; | |
1676 | struct qed_filter_ucast filter; | |
1677 | ||
1678 | if (!p_vf->vport_instance) | |
1679 | return -EINVAL; | |
1680 | ||
1a635e48 | 1681 | if (events & BIT(MAC_ADDR_FORCED)) { |
eff16960 YM |
1682 | /* Since there's no way [currently] of removing the MAC, |
1683 | * we can always assume this means we need to force it. | |
1684 | */ | |
1685 | memset(&filter, 0, sizeof(filter)); | |
1686 | filter.type = QED_FILTER_MAC; | |
1687 | filter.opcode = QED_FILTER_REPLACE; | |
1688 | filter.is_rx_filter = 1; | |
1689 | filter.is_tx_filter = 1; | |
1690 | filter.vport_to_add_to = p_vf->vport_id; | |
1691 | ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac); | |
1692 | ||
1693 | rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, | |
1694 | &filter, QED_SPQ_MODE_CB, NULL); | |
1695 | if (rc) { | |
1696 | DP_NOTICE(p_hwfn, | |
1697 | "PF failed to configure MAC for VF\n"); | |
1698 | return rc; | |
1699 | } | |
1700 | ||
1701 | p_vf->configured_features |= 1 << MAC_ADDR_FORCED; | |
1702 | } | |
1703 | ||
1a635e48 | 1704 | if (events & BIT(VLAN_ADDR_FORCED)) { |
08feecd7 YM |
1705 | struct qed_sp_vport_update_params vport_update; |
1706 | u8 removal; | |
1707 | int i; | |
1708 | ||
1709 | memset(&filter, 0, sizeof(filter)); | |
1710 | filter.type = QED_FILTER_VLAN; | |
1711 | filter.is_rx_filter = 1; | |
1712 | filter.is_tx_filter = 1; | |
1713 | filter.vport_to_add_to = p_vf->vport_id; | |
1714 | filter.vlan = p_vf->bulletin.p_virt->pvid; | |
1715 | filter.opcode = filter.vlan ? QED_FILTER_REPLACE : | |
1716 | QED_FILTER_FLUSH; | |
1717 | ||
1718 | /* Send the ramrod */ | |
1719 | rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, | |
1720 | &filter, QED_SPQ_MODE_CB, NULL); | |
1721 | if (rc) { | |
1722 | DP_NOTICE(p_hwfn, | |
1723 | "PF failed to configure VLAN for VF\n"); | |
1724 | return rc; | |
1725 | } | |
1726 | ||
1727 | /* Update the default-vlan & silent vlan stripping */ | |
1728 | memset(&vport_update, 0, sizeof(vport_update)); | |
1729 | vport_update.opaque_fid = p_vf->opaque_fid; | |
1730 | vport_update.vport_id = p_vf->vport_id; | |
1731 | vport_update.update_default_vlan_enable_flg = 1; | |
1732 | vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0; | |
1733 | vport_update.update_default_vlan_flg = 1; | |
1734 | vport_update.default_vlan = filter.vlan; | |
1735 | ||
1736 | vport_update.update_inner_vlan_removal_flg = 1; | |
1737 | removal = filter.vlan ? 1 | |
1738 | : p_vf->shadow_config.inner_vlan_removal; | |
1739 | vport_update.inner_vlan_removal_flg = removal; | |
1740 | vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0; | |
1741 | rc = qed_sp_vport_update(p_hwfn, | |
1742 | &vport_update, | |
1743 | QED_SPQ_MODE_EBLOCK, NULL); | |
1744 | if (rc) { | |
1745 | DP_NOTICE(p_hwfn, | |
1746 | "PF failed to configure VF vport for vlan\n"); | |
1747 | return rc; | |
1748 | } | |
1749 | ||
1750 | /* Update all the Rx queues */ | |
1751 | for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { | |
3da7a37a | 1752 | struct qed_queue_cid *p_cid; |
08feecd7 | 1753 | |
3da7a37a MY |
1754 | p_cid = p_vf->vf_queues[i].p_rx_cid; |
1755 | if (!p_cid) | |
08feecd7 YM |
1756 | continue; |
1757 | ||
3da7a37a MY |
1758 | rc = qed_sp_eth_rx_queues_update(p_hwfn, |
1759 | (void **)&p_cid, | |
08feecd7 YM |
1760 | 1, 0, 1, |
1761 | QED_SPQ_MODE_EBLOCK, | |
1762 | NULL); | |
1763 | if (rc) { | |
1764 | DP_NOTICE(p_hwfn, | |
1765 | "Failed to send Rx update fo queue[0x%04x]\n", | |
3da7a37a | 1766 | p_cid->rel.queue_id); |
08feecd7 YM |
1767 | return rc; |
1768 | } | |
1769 | } | |
1770 | ||
1771 | if (filter.vlan) | |
1772 | p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; | |
1773 | else | |
1a635e48 | 1774 | p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED); |
08feecd7 YM |
1775 | } |
1776 | ||
1777 | /* If forced features are terminated, we need to configure the shadow | |
1778 | * configuration back again. | |
1779 | */ | |
1780 | if (events) | |
1781 | qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); | |
1782 | ||
1783 | return rc; | |
1784 | } | |
1785 | ||
dacd88d6 YM |
1786 | static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, |
1787 | struct qed_ptt *p_ptt, | |
1788 | struct qed_vf_info *vf) | |
1789 | { | |
1790 | struct qed_sp_vport_start_params params = { 0 }; | |
1791 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; | |
1792 | struct vfpf_vport_start_tlv *start; | |
1793 | u8 status = PFVF_STATUS_SUCCESS; | |
1794 | struct qed_vf_info *vf_info; | |
08feecd7 | 1795 | u64 *p_bitmap; |
dacd88d6 YM |
1796 | int sb_id; |
1797 | int rc; | |
1798 | ||
1799 | vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true); | |
1800 | if (!vf_info) { | |
1801 | DP_NOTICE(p_hwfn->cdev, | |
1802 | "Failed to get VF info, invalid vfid [%d]\n", | |
1803 | vf->relative_vf_id); | |
1804 | return; | |
1805 | } | |
1806 | ||
1807 | vf->state = VF_ENABLED; | |
1808 | start = &mbx->req_virt->start_vport; | |
1809 | ||
b801b159 MY |
1810 | qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); |
1811 | ||
dacd88d6 YM |
1812 | /* Initialize Status block in CAU */ |
1813 | for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { | |
1814 | if (!start->sb_addr[sb_id]) { | |
1815 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
1816 | "VF[%d] did not fill the address of SB %d\n", | |
1817 | vf->relative_vf_id, sb_id); | |
1818 | break; | |
1819 | } | |
1820 | ||
1821 | qed_int_cau_conf_sb(p_hwfn, p_ptt, | |
1822 | start->sb_addr[sb_id], | |
1a635e48 | 1823 | vf->igu_sbs[sb_id], vf->abs_vf_id, 1); |
dacd88d6 | 1824 | } |
dacd88d6 YM |
1825 | |
1826 | vf->mtu = start->mtu; | |
08feecd7 YM |
1827 | vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; |
1828 | ||
1829 | /* Take into consideration configuration forced by hypervisor; | |
1830 | * If none is configured, use the supplied VF values [for old | |
1831 | * vfs that would still be fine, since they passed '0' as padding]. | |
1832 | */ | |
1833 | p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; | |
1a635e48 | 1834 | if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { |
08feecd7 YM |
1835 | u8 vf_req = start->only_untagged; |
1836 | ||
1837 | vf_info->bulletin.p_virt->default_only_untagged = vf_req; | |
1838 | *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT; | |
1839 | } | |
dacd88d6 YM |
1840 | |
1841 | params.tpa_mode = start->tpa_mode; | |
1842 | params.remove_inner_vlan = start->inner_vlan_removal; | |
831bfb0e | 1843 | params.tx_switching = true; |
dacd88d6 | 1844 | |
08feecd7 | 1845 | params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; |
dacd88d6 YM |
1846 | params.drop_ttl0 = false; |
1847 | params.concrete_fid = vf->concrete_fid; | |
1848 | params.opaque_fid = vf->opaque_fid; | |
1849 | params.vport_id = vf->vport_id; | |
1850 | params.max_buffers_per_cqe = start->max_buffers_per_cqe; | |
1851 | params.mtu = vf->mtu; | |
11a85d75 | 1852 | params.check_mac = true; |
dacd88d6 YM |
1853 | |
1854 | rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); | |
1a635e48 | 1855 | if (rc) { |
dacd88d6 YM |
1856 | DP_ERR(p_hwfn, |
1857 | "qed_iov_vf_mbx_start_vport returned error %d\n", rc); | |
1858 | status = PFVF_STATUS_FAILURE; | |
1859 | } else { | |
1860 | vf->vport_instance++; | |
08feecd7 YM |
1861 | |
1862 | /* Force configuration if needed on the newly opened vport */ | |
1863 | qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap); | |
6ddc7608 YM |
1864 | |
1865 | __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val); | |
dacd88d6 YM |
1866 | } |
1867 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, | |
1868 | sizeof(struct pfvf_def_resp_tlv), status); | |
1869 | } | |
1870 | ||
1871 | static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, | |
1872 | struct qed_ptt *p_ptt, | |
1873 | struct qed_vf_info *vf) | |
1874 | { | |
1875 | u8 status = PFVF_STATUS_SUCCESS; | |
1876 | int rc; | |
1877 | ||
1878 | vf->vport_instance--; | |
6ddc7608 | 1879 | vf->spoof_chk = false; |
dacd88d6 | 1880 | |
f109c240 MY |
1881 | if ((qed_iov_validate_active_rxq(p_hwfn, vf)) || |
1882 | (qed_iov_validate_active_txq(p_hwfn, vf))) { | |
1883 | vf->b_malicious = true; | |
1884 | DP_NOTICE(p_hwfn, | |
1885 | "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n", | |
1886 | vf->abs_vf_id); | |
1887 | status = PFVF_STATUS_MALICIOUS; | |
1888 | goto out; | |
1889 | } | |
1890 | ||
dacd88d6 | 1891 | rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); |
1a635e48 | 1892 | if (rc) { |
dacd88d6 YM |
1893 | DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", |
1894 | rc); | |
1895 | status = PFVF_STATUS_FAILURE; | |
1896 | } | |
1897 | ||
08feecd7 YM |
1898 | /* Forget the configuration on the vport */ |
1899 | vf->configured_features = 0; | |
1900 | memset(&vf->shadow_config, 0, sizeof(vf->shadow_config)); | |
1901 | ||
f109c240 | 1902 | out: |
dacd88d6 YM |
1903 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, |
1904 | sizeof(struct pfvf_def_resp_tlv), status); | |
1905 | } | |
1906 | ||
dacd88d6 YM |
1907 | static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, |
1908 | struct qed_ptt *p_ptt, | |
a044df83 YM |
1909 | struct qed_vf_info *vf, |
1910 | u8 status, bool b_legacy) | |
dacd88d6 YM |
1911 | { |
1912 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; | |
1913 | struct pfvf_start_queue_resp_tlv *p_tlv; | |
1914 | struct vfpf_start_rxq_tlv *req; | |
a044df83 | 1915 | u16 length; |
dacd88d6 YM |
1916 | |
1917 | mbx->offset = (u8 *)mbx->reply_virt; | |
1918 | ||
a044df83 YM |
1919 | /* Taking a bigger struct instead of adding a TLV to list was a |
1920 | * mistake, but one which we're now stuck with, as some older | |
1921 | * clients assume the size of the previous response. | |
1922 | */ | |
1923 | if (!b_legacy) | |
1924 | length = sizeof(*p_tlv); | |
1925 | else | |
1926 | length = sizeof(struct pfvf_def_resp_tlv); | |
1927 | ||
dacd88d6 | 1928 | p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, |
a044df83 | 1929 | length); |
dacd88d6 YM |
1930 | qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, |
1931 | sizeof(struct channel_list_end_tlv)); | |
1932 | ||
1933 | /* Update the TLV with the response */ | |
a044df83 | 1934 | if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { |
dacd88d6 | 1935 | req = &mbx->req_virt->start_rxq; |
351a4ded YM |
1936 | p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + |
1937 | offsetof(struct mstorm_vf_zone, | |
1938 | non_trigger.eth_rx_queue_producers) + | |
1939 | sizeof(struct eth_rx_prod_data) * req->rx_qid; | |
dacd88d6 YM |
1940 | } |
1941 | ||
a044df83 | 1942 | qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); |
dacd88d6 YM |
1943 | } |
1944 | ||
bbe3f233 MY |
1945 | static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn, |
1946 | struct qed_vf_info *p_vf, bool b_is_tx) | |
1947 | { | |
1948 | if (b_is_tx) | |
1949 | return QED_IOV_LEGACY_QID_TX; | |
1950 | else | |
1951 | return QED_IOV_LEGACY_QID_RX; | |
1952 | } | |
1953 | ||
dacd88d6 YM |
1954 | static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, |
1955 | struct qed_ptt *p_ptt, | |
1956 | struct qed_vf_info *vf) | |
1957 | { | |
1958 | struct qed_queue_start_common_params params; | |
3946497a | 1959 | struct qed_queue_cid_vf_params vf_params; |
dacd88d6 | 1960 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; |
41086467 | 1961 | u8 status = PFVF_STATUS_NO_RESOURCE; |
3da7a37a | 1962 | struct qed_vf_q_info *p_queue; |
dacd88d6 | 1963 | struct vfpf_start_rxq_tlv *req; |
f604b17d | 1964 | struct qed_sb_info sb_dummy; |
a044df83 | 1965 | bool b_legacy_vf = false; |
bbe3f233 | 1966 | u8 qid_usage_idx; |
dacd88d6 YM |
1967 | int rc; |
1968 | ||
dacd88d6 | 1969 | req = &mbx->req_virt->start_rxq; |
41086467 | 1970 | |
f109c240 MY |
1971 | if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid, |
1972 | QED_IOV_VALIDATE_Q_DISABLE) || | |
41086467 YM |
1973 | !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) |
1974 | goto out; | |
1975 | ||
bbe3f233 | 1976 | qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); |
3da7a37a MY |
1977 | p_queue = &vf->vf_queues[req->rx_qid]; |
1978 | ||
3946497a MY |
1979 | if (vf->acquire.vfdev_info.eth_fp_hsi_minor == |
1980 | ETH_HSI_VER_NO_PKT_LEN_TUNN) | |
3946497a | 1981 | |
bbe3f233 | 1982 | /* Acquire a new queue-cid */ |
3da7a37a MY |
1983 | memset(¶ms, 0, sizeof(params)); |
1984 | params.queue_id = p_queue->fw_rx_qid; | |
dacd88d6 | 1985 | params.vport_id = vf->vport_id; |
3da7a37a | 1986 | params.stats_id = vf->abs_vf_id + 0x10; |
f604b17d MY |
1987 | /* Since IGU index is passed via sb_info, construct a dummy one */ |
1988 | memset(&sb_dummy, 0, sizeof(sb_dummy)); | |
1989 | sb_dummy.igu_sb_id = req->hw_sb; | |
1990 | params.p_sb = &sb_dummy; | |
dacd88d6 YM |
1991 | params.sb_idx = req->sb_index; |
1992 | ||
3946497a MY |
1993 | memset(&vf_params, 0, sizeof(vf_params)); |
1994 | vf_params.vfid = vf->relative_vf_id; | |
1995 | vf_params.vf_qid = (u8)req->rx_qid; | |
1996 | vf_params.vf_legacy = b_legacy_vf; | |
bbe3f233 | 1997 | vf_params.qid_usage_idx = qid_usage_idx; |
3946497a MY |
1998 | p_queue->p_rx_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, |
1999 | ¶ms, &vf_params); | |
3da7a37a MY |
2000 | if (!p_queue->p_rx_cid) |
2001 | goto out; | |
2002 | ||
a044df83 YM |
2003 | /* Legacy VFs have their Producers in a different location, which they |
2004 | * calculate on their own and clean the producer prior to this. | |
2005 | */ | |
3946497a | 2006 | if (!b_legacy_vf) |
a044df83 YM |
2007 | REG_WR(p_hwfn, |
2008 | GTT_BAR0_MAP_REG_MSDM_RAM + | |
2009 | MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), | |
2010 | 0); | |
a044df83 | 2011 | |
3da7a37a MY |
2012 | rc = qed_eth_rxq_start_ramrod(p_hwfn, |
2013 | p_queue->p_rx_cid, | |
2014 | req->bd_max_bytes, | |
2015 | req->rxq_addr, | |
2016 | req->cqe_pbl_addr, req->cqe_pbl_size); | |
dacd88d6 YM |
2017 | if (rc) { |
2018 | status = PFVF_STATUS_FAILURE; | |
3da7a37a MY |
2019 | qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid); |
2020 | p_queue->p_rx_cid = NULL; | |
dacd88d6 | 2021 | } else { |
41086467 | 2022 | status = PFVF_STATUS_SUCCESS; |
dacd88d6 YM |
2023 | vf->num_active_rxqs++; |
2024 | } | |
2025 | ||
41086467 | 2026 | out: |
a044df83 | 2027 | qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf); |
dacd88d6 YM |
2028 | } |
2029 | ||
eaf3c0c6 CM |
2030 | static void |
2031 | qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp, | |
2032 | struct qed_tunnel_info *p_tun, | |
2033 | u16 tunn_feature_mask) | |
2034 | { | |
2035 | p_resp->tunn_feature_mask = tunn_feature_mask; | |
2036 | p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled; | |
2037 | p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled; | |
2038 | p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled; | |
2039 | p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled; | |
2040 | p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled; | |
2041 | p_resp->vxlan_clss = p_tun->vxlan.tun_cls; | |
2042 | p_resp->l2gre_clss = p_tun->l2_gre.tun_cls; | |
2043 | p_resp->ipgre_clss = p_tun->ip_gre.tun_cls; | |
2044 | p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls; | |
2045 | p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls; | |
2046 | p_resp->geneve_udp_port = p_tun->geneve_port.port; | |
2047 | p_resp->vxlan_udp_port = p_tun->vxlan_port.port; | |
2048 | } | |
2049 | ||
2050 | static void | |
2051 | __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, | |
2052 | struct qed_tunn_update_type *p_tun, | |
2053 | enum qed_tunn_mode mask, u8 tun_cls) | |
2054 | { | |
2055 | if (p_req->tun_mode_update_mask & BIT(mask)) { | |
2056 | p_tun->b_update_mode = true; | |
2057 | ||
2058 | if (p_req->tunn_mode & BIT(mask)) | |
2059 | p_tun->b_mode_enabled = true; | |
2060 | } | |
2061 | ||
2062 | p_tun->tun_cls = tun_cls; | |
2063 | } | |
2064 | ||
2065 | static void | |
2066 | qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, | |
2067 | struct qed_tunn_update_type *p_tun, | |
2068 | struct qed_tunn_update_udp_port *p_port, | |
2069 | enum qed_tunn_mode mask, | |
2070 | u8 tun_cls, u8 update_port, u16 port) | |
2071 | { | |
2072 | if (update_port) { | |
2073 | p_port->b_update_port = true; | |
2074 | p_port->port = port; | |
2075 | } | |
2076 | ||
2077 | __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls); | |
2078 | } | |
2079 | ||
2080 | static bool | |
2081 | qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req) | |
2082 | { | |
2083 | bool b_update_requested = false; | |
2084 | ||
2085 | if (p_req->tun_mode_update_mask || p_req->update_tun_cls || | |
2086 | p_req->update_geneve_port || p_req->update_vxlan_port) | |
2087 | b_update_requested = true; | |
2088 | ||
2089 | return b_update_requested; | |
2090 | } | |
2091 | ||
2092 | static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc) | |
2093 | { | |
2094 | if (tun->b_update_mode && !tun->b_mode_enabled) { | |
2095 | tun->b_update_mode = false; | |
2096 | *rc = -EINVAL; | |
2097 | } | |
2098 | } | |
2099 | ||
2100 | static int | |
2101 | qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn, | |
2102 | u16 *tun_features, bool *update, | |
2103 | struct qed_tunnel_info *tun_src) | |
2104 | { | |
2105 | struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth; | |
2106 | struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel; | |
2107 | u16 bultn_vxlan_port, bultn_geneve_port; | |
2108 | void *cookie = p_hwfn->cdev->ops_cookie; | |
2109 | int i, rc = 0; | |
2110 | ||
2111 | *tun_features = p_hwfn->cdev->tunn_feature_mask; | |
2112 | bultn_vxlan_port = tun->vxlan_port.port; | |
2113 | bultn_geneve_port = tun->geneve_port.port; | |
2114 | qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc); | |
2115 | qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc); | |
2116 | qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc); | |
2117 | qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc); | |
2118 | qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc); | |
2119 | ||
2120 | if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) && | |
2121 | (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN || | |
2122 | tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || | |
2123 | tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || | |
2124 | tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN || | |
2125 | tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) { | |
2126 | tun_src->b_update_rx_cls = false; | |
2127 | tun_src->b_update_tx_cls = false; | |
2128 | rc = -EINVAL; | |
2129 | } | |
2130 | ||
2131 | if (tun_src->vxlan_port.b_update_port) { | |
2132 | if (tun_src->vxlan_port.port == tun->vxlan_port.port) { | |
2133 | tun_src->vxlan_port.b_update_port = false; | |
2134 | } else { | |
2135 | *update = true; | |
2136 | bultn_vxlan_port = tun_src->vxlan_port.port; | |
2137 | } | |
2138 | } | |
2139 | ||
2140 | if (tun_src->geneve_port.b_update_port) { | |
2141 | if (tun_src->geneve_port.port == tun->geneve_port.port) { | |
2142 | tun_src->geneve_port.b_update_port = false; | |
2143 | } else { | |
2144 | *update = true; | |
2145 | bultn_geneve_port = tun_src->geneve_port.port; | |
2146 | } | |
2147 | } | |
2148 | ||
2149 | qed_for_each_vf(p_hwfn, i) { | |
2150 | qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port, | |
2151 | bultn_geneve_port); | |
2152 | } | |
2153 | ||
2154 | qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); | |
2155 | ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port); | |
2156 | ||
2157 | return rc; | |
2158 | } | |
2159 | ||
2160 | static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn, | |
2161 | struct qed_ptt *p_ptt, | |
2162 | struct qed_vf_info *p_vf) | |
2163 | { | |
2164 | struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; | |
2165 | struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; | |
2166 | struct pfvf_update_tunn_param_tlv *p_resp; | |
2167 | struct vfpf_update_tunn_param_tlv *p_req; | |
2168 | u8 status = PFVF_STATUS_SUCCESS; | |
2169 | bool b_update_required = false; | |
2170 | struct qed_tunnel_info tunn; | |
2171 | u16 tunn_feature_mask = 0; | |
2172 | int i, rc = 0; | |
2173 | ||
2174 | mbx->offset = (u8 *)mbx->reply_virt; | |
2175 | ||
2176 | memset(&tunn, 0, sizeof(tunn)); | |
2177 | p_req = &mbx->req_virt->tunn_param_update; | |
2178 | ||
2179 | if (!qed_iov_pf_validate_tunn_param(p_req)) { | |
2180 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
2181 | "No tunnel update requested by VF\n"); | |
2182 | status = PFVF_STATUS_FAILURE; | |
2183 | goto send_resp; | |
2184 | } | |
2185 | ||
2186 | tunn.b_update_rx_cls = p_req->update_tun_cls; | |
2187 | tunn.b_update_tx_cls = p_req->update_tun_cls; | |
2188 | ||
2189 | qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port, | |
2190 | QED_MODE_VXLAN_TUNN, p_req->vxlan_clss, | |
2191 | p_req->update_vxlan_port, | |
2192 | p_req->vxlan_port); | |
2193 | qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port, | |
2194 | QED_MODE_L2GENEVE_TUNN, | |
2195 | p_req->l2geneve_clss, | |
2196 | p_req->update_geneve_port, | |
2197 | p_req->geneve_port); | |
2198 | __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve, | |
2199 | QED_MODE_IPGENEVE_TUNN, | |
2200 | p_req->ipgeneve_clss); | |
2201 | __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre, | |
2202 | QED_MODE_L2GRE_TUNN, p_req->l2gre_clss); | |
2203 | __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre, | |
2204 | QED_MODE_IPGRE_TUNN, p_req->ipgre_clss); | |
2205 | ||
2206 | /* If PF modifies VF's req then it should | |
2207 | * still return an error in case of partial configuration | |
2208 | * or modified configuration as opposed to requested one. | |
2209 | */ | |
2210 | rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask, | |
2211 | &b_update_required, &tunn); | |
2212 | ||
2213 | if (rc) | |
2214 | status = PFVF_STATUS_FAILURE; | |
2215 | ||
2216 | /* If QED client is willing to update anything ? */ | |
2217 | if (b_update_required) { | |
2218 | u16 geneve_port; | |
2219 | ||
4f64675f | 2220 | rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn, |
eaf3c0c6 CM |
2221 | QED_SPQ_MODE_EBLOCK, NULL); |
2222 | if (rc) | |
2223 | status = PFVF_STATUS_FAILURE; | |
2224 | ||
2225 | geneve_port = p_tun->geneve_port.port; | |
2226 | qed_for_each_vf(p_hwfn, i) { | |
2227 | qed_iov_bulletin_set_udp_ports(p_hwfn, i, | |
2228 | p_tun->vxlan_port.port, | |
2229 | geneve_port); | |
2230 | } | |
2231 | } | |
2232 | ||
2233 | send_resp: | |
2234 | p_resp = qed_add_tlv(p_hwfn, &mbx->offset, | |
2235 | CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp)); | |
2236 | ||
2237 | qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask); | |
2238 | qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, | |
2239 | sizeof(struct channel_list_end_tlv)); | |
2240 | ||
2241 | qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); | |
2242 | } | |
2243 | ||
5040acf5 YM |
2244 | static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, |
2245 | struct qed_ptt *p_ptt, | |
2246 | struct qed_vf_info *p_vf, u8 status) | |
2247 | { | |
2248 | struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; | |
2249 | struct pfvf_start_queue_resp_tlv *p_tlv; | |
a044df83 YM |
2250 | bool b_legacy = false; |
2251 | u16 length; | |
5040acf5 YM |
2252 | |
2253 | mbx->offset = (u8 *)mbx->reply_virt; | |
2254 | ||
a044df83 YM |
2255 | /* Taking a bigger struct instead of adding a TLV to list was a |
2256 | * mistake, but one which we're now stuck with, as some older | |
2257 | * clients assume the size of the previous response. | |
2258 | */ | |
2259 | if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == | |
2260 | ETH_HSI_VER_NO_PKT_LEN_TUNN) | |
2261 | b_legacy = true; | |
2262 | ||
2263 | if (!b_legacy) | |
2264 | length = sizeof(*p_tlv); | |
2265 | else | |
2266 | length = sizeof(struct pfvf_def_resp_tlv); | |
2267 | ||
5040acf5 | 2268 | p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ, |
a044df83 | 2269 | length); |
5040acf5 YM |
2270 | qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, |
2271 | sizeof(struct channel_list_end_tlv)); | |
2272 | ||
2273 | /* Update the TLV with the response */ | |
a044df83 | 2274 | if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { |
5040acf5 YM |
2275 | u16 qid = mbx->req_virt->start_txq.tx_qid; |
2276 | ||
51ff1725 RA |
2277 | p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid, |
2278 | DQ_DEMS_LEGACY); | |
5040acf5 YM |
2279 | } |
2280 | ||
a044df83 | 2281 | qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); |
5040acf5 YM |
2282 | } |
2283 | ||
dacd88d6 YM |
2284 | static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, |
2285 | struct qed_ptt *p_ptt, | |
2286 | struct qed_vf_info *vf) | |
2287 | { | |
dacd88d6 | 2288 | struct qed_queue_start_common_params params; |
3946497a | 2289 | struct qed_queue_cid_vf_params vf_params; |
dacd88d6 | 2290 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; |
41086467 | 2291 | u8 status = PFVF_STATUS_NO_RESOURCE; |
dacd88d6 | 2292 | struct vfpf_start_txq_tlv *req; |
3da7a37a | 2293 | struct qed_vf_q_info *p_queue; |
f604b17d | 2294 | struct qed_sb_info sb_dummy; |
3946497a | 2295 | bool b_vf_legacy = false; |
bbe3f233 | 2296 | u8 qid_usage_idx; |
dacd88d6 | 2297 | int rc; |
3da7a37a | 2298 | u16 pq; |
dacd88d6 | 2299 | |
dacd88d6 YM |
2300 | memset(¶ms, 0, sizeof(params)); |
2301 | req = &mbx->req_virt->start_txq; | |
41086467 | 2302 | |
f109c240 MY |
2303 | if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid, |
2304 | QED_IOV_VALIDATE_Q_DISABLE) || | |
41086467 YM |
2305 | !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) |
2306 | goto out; | |
2307 | ||
bbe3f233 | 2308 | qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); |
3da7a37a MY |
2309 | p_queue = &vf->vf_queues[req->tx_qid]; |
2310 | ||
3946497a MY |
2311 | if (vf->acquire.vfdev_info.eth_fp_hsi_minor == |
2312 | ETH_HSI_VER_NO_PKT_LEN_TUNN) | |
2313 | b_vf_legacy = true; | |
2314 | ||
bbe3f233 | 2315 | /* Acquire a new queue-cid */ |
3da7a37a | 2316 | params.queue_id = p_queue->fw_tx_qid; |
dacd88d6 | 2317 | params.vport_id = vf->vport_id; |
3da7a37a | 2318 | params.stats_id = vf->abs_vf_id + 0x10; |
f604b17d MY |
2319 | |
2320 | /* Since IGU index is passed via sb_info, construct a dummy one */ | |
2321 | memset(&sb_dummy, 0, sizeof(sb_dummy)); | |
2322 | sb_dummy.igu_sb_id = req->hw_sb; | |
2323 | params.p_sb = &sb_dummy; | |
dacd88d6 YM |
2324 | params.sb_idx = req->sb_index; |
2325 | ||
3946497a MY |
2326 | memset(&vf_params, 0, sizeof(vf_params)); |
2327 | vf_params.vfid = vf->relative_vf_id; | |
2328 | vf_params.vf_qid = (u8)req->tx_qid; | |
2329 | vf_params.vf_legacy = b_vf_legacy; | |
bbe3f233 | 2330 | vf_params.qid_usage_idx = qid_usage_idx; |
3946497a MY |
2331 | |
2332 | p_queue->p_tx_cid = qed_eth_queue_to_cid(p_hwfn, | |
2333 | vf->opaque_fid, | |
2334 | ¶ms, &vf_params); | |
3da7a37a MY |
2335 | if (!p_queue->p_tx_cid) |
2336 | goto out; | |
dacd88d6 | 2337 | |
b5a9ee7c | 2338 | pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id); |
3da7a37a MY |
2339 | rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid, |
2340 | req->pbl_addr, req->pbl_size, pq); | |
41086467 | 2341 | if (rc) { |
dacd88d6 | 2342 | status = PFVF_STATUS_FAILURE; |
3da7a37a MY |
2343 | qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid); |
2344 | p_queue->p_tx_cid = NULL; | |
41086467 YM |
2345 | } else { |
2346 | status = PFVF_STATUS_SUCCESS; | |
41086467 | 2347 | } |
dacd88d6 | 2348 | |
41086467 | 2349 | out: |
5040acf5 | 2350 | qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status); |
dacd88d6 YM |
2351 | } |
2352 | ||
2353 | static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, | |
2354 | struct qed_vf_info *vf, | |
4c4fa793 | 2355 | u16 rxq_id, bool cqe_completion) |
dacd88d6 | 2356 | { |
3da7a37a | 2357 | struct qed_vf_q_info *p_queue; |
dacd88d6 | 2358 | int rc = 0; |
dacd88d6 | 2359 | |
4c4fa793 MY |
2360 | if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, |
2361 | QED_IOV_VALIDATE_Q_ENABLE)) { | |
2362 | DP_VERBOSE(p_hwfn, | |
2363 | QED_MSG_IOV, | |
2364 | "VF[%d] Tried Closing Rx 0x%04x which is inactive\n", | |
2365 | vf->relative_vf_id, rxq_id); | |
dacd88d6 | 2366 | return -EINVAL; |
4c4fa793 | 2367 | } |
dacd88d6 | 2368 | |
4c4fa793 | 2369 | p_queue = &vf->vf_queues[rxq_id]; |
3da7a37a | 2370 | |
4c4fa793 MY |
2371 | rc = qed_eth_rx_queue_stop(p_hwfn, |
2372 | p_queue->p_rx_cid, | |
2373 | false, cqe_completion); | |
2374 | if (rc) | |
2375 | return rc; | |
3da7a37a | 2376 | |
4c4fa793 MY |
2377 | p_queue->p_rx_cid = NULL; |
2378 | vf->num_active_rxqs--; | |
dacd88d6 | 2379 | |
4c4fa793 | 2380 | return 0; |
dacd88d6 YM |
2381 | } |
2382 | ||
2383 | static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, | |
4c4fa793 | 2384 | struct qed_vf_info *vf, u16 txq_id) |
dacd88d6 | 2385 | { |
3da7a37a | 2386 | struct qed_vf_q_info *p_queue; |
4c4fa793 | 2387 | int rc = 0; |
dacd88d6 | 2388 | |
4c4fa793 MY |
2389 | if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, |
2390 | QED_IOV_VALIDATE_Q_ENABLE)) | |
dacd88d6 YM |
2391 | return -EINVAL; |
2392 | ||
4c4fa793 | 2393 | p_queue = &vf->vf_queues[txq_id]; |
dacd88d6 | 2394 | |
4c4fa793 MY |
2395 | rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid); |
2396 | if (rc) | |
2397 | return rc; | |
3da7a37a | 2398 | |
4c4fa793 | 2399 | p_queue->p_tx_cid = NULL; |
3da7a37a | 2400 | |
4c4fa793 | 2401 | return 0; |
dacd88d6 YM |
2402 | } |
2403 | ||
2404 | static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, | |
2405 | struct qed_ptt *p_ptt, | |
2406 | struct qed_vf_info *vf) | |
2407 | { | |
2408 | u16 length = sizeof(struct pfvf_def_resp_tlv); | |
2409 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; | |
4c4fa793 | 2410 | u8 status = PFVF_STATUS_FAILURE; |
dacd88d6 YM |
2411 | struct vfpf_stop_rxqs_tlv *req; |
2412 | int rc; | |
2413 | ||
4c4fa793 MY |
2414 | /* There has never been an official driver that used this interface |
2415 | * for stopping multiple queues, and it is now considered deprecated. | |
2416 | * Validate this isn't used here. | |
dacd88d6 YM |
2417 | */ |
2418 | req = &mbx->req_virt->stop_rxqs; | |
4c4fa793 MY |
2419 | if (req->num_rxqs != 1) { |
2420 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
2421 | "Odd; VF[%d] tried stopping multiple Rx queues\n", | |
2422 | vf->relative_vf_id); | |
2423 | status = PFVF_STATUS_NOT_SUPPORTED; | |
2424 | goto out; | |
2425 | } | |
dacd88d6 | 2426 | |
4c4fa793 MY |
2427 | rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, |
2428 | req->cqe_completion); | |
2429 | if (!rc) | |
2430 | status = PFVF_STATUS_SUCCESS; | |
2431 | out: | |
dacd88d6 YM |
2432 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, |
2433 | length, status); | |
2434 | } | |
2435 | ||
2436 | static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, | |
2437 | struct qed_ptt *p_ptt, | |
2438 | struct qed_vf_info *vf) | |
2439 | { | |
2440 | u16 length = sizeof(struct pfvf_def_resp_tlv); | |
2441 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; | |
4c4fa793 | 2442 | u8 status = PFVF_STATUS_FAILURE; |
dacd88d6 YM |
2443 | struct vfpf_stop_txqs_tlv *req; |
2444 | int rc; | |
2445 | ||
4c4fa793 MY |
2446 | /* There has never been an official driver that used this interface |
2447 | * for stopping multiple queues, and it is now considered deprecated. | |
2448 | * Validate this isn't used here. | |
dacd88d6 YM |
2449 | */ |
2450 | req = &mbx->req_virt->stop_txqs; | |
4c4fa793 MY |
2451 | if (req->num_txqs != 1) { |
2452 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
2453 | "Odd; VF[%d] tried stopping multiple Tx queues\n", | |
2454 | vf->relative_vf_id); | |
2455 | status = PFVF_STATUS_NOT_SUPPORTED; | |
2456 | goto out; | |
2457 | } | |
2458 | rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid); | |
2459 | if (!rc) | |
2460 | status = PFVF_STATUS_SUCCESS; | |
dacd88d6 | 2461 | |
4c4fa793 | 2462 | out: |
dacd88d6 YM |
2463 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, |
2464 | length, status); | |
2465 | } | |
2466 | ||
17b235c1 YM |
2467 | static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, |
2468 | struct qed_ptt *p_ptt, | |
2469 | struct qed_vf_info *vf) | |
2470 | { | |
3da7a37a | 2471 | struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF]; |
17b235c1 YM |
2472 | u16 length = sizeof(struct pfvf_def_resp_tlv); |
2473 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; | |
2474 | struct vfpf_update_rxq_tlv *req; | |
3da7a37a | 2475 | u8 status = PFVF_STATUS_FAILURE; |
17b235c1 YM |
2476 | u8 complete_event_flg; |
2477 | u8 complete_cqe_flg; | |
2478 | u16 qid; | |
2479 | int rc; | |
2480 | u8 i; | |
2481 | ||
2482 | req = &mbx->req_virt->update_rxq; | |
2483 | complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); | |
2484 | complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); | |
2485 | ||
3da7a37a | 2486 | /* Validate inputs */ |
f109c240 MY |
2487 | for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) |
2488 | if (!qed_iov_validate_rxq(p_hwfn, vf, i, | |
2489 | QED_IOV_VALIDATE_Q_ENABLE)) { | |
2490 | DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", | |
2491 | vf->relative_vf_id, req->rx_qid, req->num_rxqs); | |
3da7a37a | 2492 | goto out; |
17b235c1 YM |
2493 | } |
2494 | ||
f109c240 MY |
2495 | /* Prepare the handlers */ |
2496 | for (i = 0; i < req->num_rxqs; i++) { | |
2497 | qid = req->rx_qid + i; | |
3da7a37a | 2498 | handlers[i] = vf->vf_queues[qid].p_rx_cid; |
17b235c1 YM |
2499 | } |
2500 | ||
3da7a37a MY |
2501 | rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers, |
2502 | req->num_rxqs, | |
2503 | complete_cqe_flg, | |
2504 | complete_event_flg, | |
2505 | QED_SPQ_MODE_EBLOCK, NULL); | |
2506 | if (rc) | |
2507 | goto out; | |
2508 | ||
2509 | status = PFVF_STATUS_SUCCESS; | |
2510 | out: | |
17b235c1 YM |
2511 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, |
2512 | length, status); | |
2513 | } | |
2514 | ||
dacd88d6 YM |
2515 | void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, |
2516 | void *p_tlvs_list, u16 req_type) | |
2517 | { | |
2518 | struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; | |
2519 | int len = 0; | |
2520 | ||
2521 | do { | |
2522 | if (!p_tlv->length) { | |
2523 | DP_NOTICE(p_hwfn, "Zero length TLV found\n"); | |
2524 | return NULL; | |
2525 | } | |
2526 | ||
2527 | if (p_tlv->type == req_type) { | |
2528 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
2529 | "Extended tlv type %d, length %d found\n", | |
2530 | p_tlv->type, p_tlv->length); | |
2531 | return p_tlv; | |
2532 | } | |
2533 | ||
2534 | len += p_tlv->length; | |
2535 | p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); | |
2536 | ||
2537 | if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { | |
2538 | DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n"); | |
2539 | return NULL; | |
2540 | } | |
2541 | } while (p_tlv->type != CHANNEL_TLV_LIST_END); | |
2542 | ||
2543 | return NULL; | |
2544 | } | |
2545 | ||
2546 | static void | |
2547 | qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn, | |
2548 | struct qed_sp_vport_update_params *p_data, | |
2549 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) | |
2550 | { | |
2551 | struct vfpf_vport_update_activate_tlv *p_act_tlv; | |
2552 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; | |
2553 | ||
2554 | p_act_tlv = (struct vfpf_vport_update_activate_tlv *) | |
2555 | qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); | |
2556 | if (!p_act_tlv) | |
2557 | return; | |
2558 | ||
2559 | p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; | |
2560 | p_data->vport_active_rx_flg = p_act_tlv->active_rx; | |
2561 | p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; | |
2562 | p_data->vport_active_tx_flg = p_act_tlv->active_tx; | |
2563 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE; | |
2564 | } | |
2565 | ||
17b235c1 YM |
2566 | static void |
2567 | qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, | |
2568 | struct qed_sp_vport_update_params *p_data, | |
2569 | struct qed_vf_info *p_vf, | |
2570 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) | |
2571 | { | |
2572 | struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; | |
2573 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; | |
2574 | ||
2575 | p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) | |
2576 | qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); | |
2577 | if (!p_vlan_tlv) | |
2578 | return; | |
2579 | ||
08feecd7 YM |
2580 | p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; |
2581 | ||
2582 | /* Ignore the VF request if we're forcing a vlan */ | |
1a635e48 | 2583 | if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) { |
08feecd7 YM |
2584 | p_data->update_inner_vlan_removal_flg = 1; |
2585 | p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; | |
2586 | } | |
17b235c1 YM |
2587 | |
2588 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP; | |
2589 | } | |
2590 | ||
2591 | static void | |
2592 | qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn, | |
2593 | struct qed_sp_vport_update_params *p_data, | |
2594 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) | |
2595 | { | |
2596 | struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; | |
2597 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; | |
2598 | ||
2599 | p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) | |
2600 | qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, | |
2601 | tlv); | |
2602 | if (!p_tx_switch_tlv) | |
2603 | return; | |
2604 | ||
2605 | p_data->update_tx_switching_flg = 1; | |
2606 | p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; | |
2607 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH; | |
2608 | } | |
2609 | ||
dacd88d6 YM |
2610 | static void |
2611 | qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, | |
2612 | struct qed_sp_vport_update_params *p_data, | |
2613 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) | |
2614 | { | |
2615 | struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; | |
2616 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; | |
2617 | ||
2618 | p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) | |
2619 | qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); | |
2620 | if (!p_mcast_tlv) | |
2621 | return; | |
2622 | ||
2623 | p_data->update_approx_mcast_flg = 1; | |
2624 | memcpy(p_data->bins, p_mcast_tlv->bins, | |
2625 | sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); | |
2626 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; | |
2627 | } | |
2628 | ||
2629 | static void | |
2630 | qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn, | |
2631 | struct qed_sp_vport_update_params *p_data, | |
2632 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) | |
2633 | { | |
2634 | struct qed_filter_accept_flags *p_flags = &p_data->accept_flags; | |
2635 | struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; | |
2636 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; | |
2637 | ||
2638 | p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) | |
2639 | qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); | |
2640 | if (!p_accept_tlv) | |
2641 | return; | |
2642 | ||
2643 | p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; | |
2644 | p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; | |
2645 | p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; | |
2646 | p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; | |
2647 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM; | |
2648 | } | |
2649 | ||
17b235c1 YM |
2650 | static void |
2651 | qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn, | |
2652 | struct qed_sp_vport_update_params *p_data, | |
2653 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) | |
2654 | { | |
2655 | struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; | |
2656 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; | |
2657 | ||
2658 | p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) | |
2659 | qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, | |
2660 | tlv); | |
2661 | if (!p_accept_any_vlan) | |
2662 | return; | |
2663 | ||
2664 | p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; | |
2665 | p_data->update_accept_any_vlan_flg = | |
2666 | p_accept_any_vlan->update_accept_any_vlan_flg; | |
2667 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; | |
2668 | } | |
2669 | ||
dacd88d6 YM |
2670 | static void |
2671 | qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, | |
2672 | struct qed_vf_info *vf, | |
2673 | struct qed_sp_vport_update_params *p_data, | |
2674 | struct qed_rss_params *p_rss, | |
f29ffdb6 MY |
2675 | struct qed_iov_vf_mbx *p_mbx, |
2676 | u16 *tlvs_mask, u16 *tlvs_accepted) | |
dacd88d6 YM |
2677 | { |
2678 | struct vfpf_vport_update_rss_tlv *p_rss_tlv; | |
2679 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; | |
f29ffdb6 | 2680 | bool b_reject = false; |
dacd88d6 | 2681 | u16 table_size; |
f29ffdb6 | 2682 | u16 i, q_idx; |
dacd88d6 YM |
2683 | |
2684 | p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) | |
2685 | qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); | |
2686 | if (!p_rss_tlv) { | |
2687 | p_data->rss_params = NULL; | |
2688 | return; | |
2689 | } | |
2690 | ||
2691 | memset(p_rss, 0, sizeof(struct qed_rss_params)); | |
2692 | ||
2693 | p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags & | |
2694 | VFPF_UPDATE_RSS_CONFIG_FLAG); | |
2695 | p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags & | |
2696 | VFPF_UPDATE_RSS_CAPS_FLAG); | |
2697 | p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags & | |
2698 | VFPF_UPDATE_RSS_IND_TABLE_FLAG); | |
2699 | p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags & | |
2700 | VFPF_UPDATE_RSS_KEY_FLAG); | |
2701 | ||
2702 | p_rss->rss_enable = p_rss_tlv->rss_enable; | |
2703 | p_rss->rss_eng_id = vf->relative_vf_id + 1; | |
2704 | p_rss->rss_caps = p_rss_tlv->rss_caps; | |
2705 | p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; | |
dacd88d6 YM |
2706 | memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); |
2707 | ||
2708 | table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), | |
2709 | (1 << p_rss_tlv->rss_table_size_log)); | |
2710 | ||
dacd88d6 | 2711 | for (i = 0; i < table_size; i++) { |
f29ffdb6 | 2712 | q_idx = p_rss_tlv->rss_ind_table[i]; |
f109c240 MY |
2713 | if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx, |
2714 | QED_IOV_VALIDATE_Q_ENABLE)) { | |
f29ffdb6 MY |
2715 | DP_VERBOSE(p_hwfn, |
2716 | QED_MSG_IOV, | |
2717 | "VF[%d]: Omitting RSS due to wrong queue %04x\n", | |
2718 | vf->relative_vf_id, q_idx); | |
2719 | b_reject = true; | |
2720 | goto out; | |
2721 | } | |
dacd88d6 | 2722 | |
f29ffdb6 | 2723 | p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid; |
dacd88d6 YM |
2724 | } |
2725 | ||
2726 | p_data->rss_params = p_rss; | |
f29ffdb6 | 2727 | out: |
dacd88d6 | 2728 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; |
f29ffdb6 MY |
2729 | if (!b_reject) |
2730 | *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS; | |
dacd88d6 YM |
2731 | } |
2732 | ||
17b235c1 YM |
2733 | static void |
2734 | qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, | |
2735 | struct qed_vf_info *vf, | |
2736 | struct qed_sp_vport_update_params *p_data, | |
2737 | struct qed_sge_tpa_params *p_sge_tpa, | |
2738 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) | |
2739 | { | |
2740 | struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; | |
2741 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; | |
2742 | ||
2743 | p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) | |
2744 | qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); | |
2745 | ||
2746 | if (!p_sge_tpa_tlv) { | |
2747 | p_data->sge_tpa_params = NULL; | |
2748 | return; | |
2749 | } | |
2750 | ||
2751 | memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params)); | |
2752 | ||
2753 | p_sge_tpa->update_tpa_en_flg = | |
2754 | !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); | |
2755 | p_sge_tpa->update_tpa_param_flg = | |
2756 | !!(p_sge_tpa_tlv->update_sge_tpa_flags & | |
2757 | VFPF_UPDATE_TPA_PARAM_FLAG); | |
2758 | ||
2759 | p_sge_tpa->tpa_ipv4_en_flg = | |
2760 | !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); | |
2761 | p_sge_tpa->tpa_ipv6_en_flg = | |
2762 | !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); | |
2763 | p_sge_tpa->tpa_pkt_split_flg = | |
2764 | !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); | |
2765 | p_sge_tpa->tpa_hdr_data_split_flg = | |
2766 | !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); | |
2767 | p_sge_tpa->tpa_gro_consistent_flg = | |
2768 | !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); | |
2769 | ||
2770 | p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; | |
2771 | p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; | |
2772 | p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; | |
2773 | p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; | |
2774 | p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; | |
2775 | ||
2776 | p_data->sge_tpa_params = p_sge_tpa; | |
2777 | ||
2778 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; | |
2779 | } | |
2780 | ||
f990c82c MY |
2781 | static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn, |
2782 | u8 vfid, | |
2783 | struct qed_sp_vport_update_params *params, | |
2784 | u16 *tlvs) | |
2785 | { | |
2786 | u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; | |
2787 | struct qed_filter_accept_flags *flags = ¶ms->accept_flags; | |
2788 | struct qed_public_vf_info *vf_info; | |
2789 | ||
2790 | /* Untrusted VFs can't even be trusted to know that fact. | |
2791 | * Simply indicate everything is configured fine, and trace | |
2792 | * configuration 'behind their back'. | |
2793 | */ | |
2794 | if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM))) | |
2795 | return 0; | |
2796 | ||
2797 | vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); | |
2798 | ||
2799 | if (flags->update_rx_mode_config) { | |
2800 | vf_info->rx_accept_mode = flags->rx_accept_filter; | |
2801 | if (!vf_info->is_trusted_configured) | |
2802 | flags->rx_accept_filter &= ~mask; | |
2803 | } | |
2804 | ||
2805 | if (flags->update_tx_mode_config) { | |
2806 | vf_info->tx_accept_mode = flags->tx_accept_filter; | |
2807 | if (!vf_info->is_trusted_configured) | |
2808 | flags->tx_accept_filter &= ~mask; | |
2809 | } | |
2810 | ||
2811 | return 0; | |
2812 | } | |
2813 | ||
dacd88d6 YM |
2814 | static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, |
2815 | struct qed_ptt *p_ptt, | |
2816 | struct qed_vf_info *vf) | |
2817 | { | |
f29ffdb6 | 2818 | struct qed_rss_params *p_rss_params = NULL; |
dacd88d6 YM |
2819 | struct qed_sp_vport_update_params params; |
2820 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; | |
17b235c1 | 2821 | struct qed_sge_tpa_params sge_tpa_params; |
f29ffdb6 | 2822 | u16 tlvs_mask = 0, tlvs_accepted = 0; |
dacd88d6 | 2823 | u8 status = PFVF_STATUS_SUCCESS; |
dacd88d6 YM |
2824 | u16 length; |
2825 | int rc; | |
2826 | ||
41086467 YM |
2827 | /* Valiate PF can send such a request */ |
2828 | if (!vf->vport_instance) { | |
2829 | DP_VERBOSE(p_hwfn, | |
2830 | QED_MSG_IOV, | |
2831 | "No VPORT instance available for VF[%d], failing vport update\n", | |
2832 | vf->abs_vf_id); | |
2833 | status = PFVF_STATUS_FAILURE; | |
2834 | goto out; | |
2835 | } | |
f29ffdb6 MY |
2836 | p_rss_params = vzalloc(sizeof(*p_rss_params)); |
2837 | if (p_rss_params == NULL) { | |
2838 | status = PFVF_STATUS_FAILURE; | |
2839 | goto out; | |
2840 | } | |
41086467 | 2841 | |
dacd88d6 YM |
2842 | memset(¶ms, 0, sizeof(params)); |
2843 | params.opaque_fid = vf->opaque_fid; | |
2844 | params.vport_id = vf->vport_id; | |
2845 | params.rss_params = NULL; | |
2846 | ||
2847 | /* Search for extended tlvs list and update values | |
2848 | * from VF in struct qed_sp_vport_update_params. | |
2849 | */ | |
2850 | qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask); | |
17b235c1 YM |
2851 | qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask); |
2852 | qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); | |
dacd88d6 YM |
2853 | qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); |
2854 | qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); | |
17b235c1 YM |
2855 | qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); |
2856 | qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, | |
2857 | &sge_tpa_params, mbx, &tlvs_mask); | |
dacd88d6 | 2858 | |
f29ffdb6 MY |
2859 | tlvs_accepted = tlvs_mask; |
2860 | ||
2861 | /* Some of the extended TLVs need to be validated first; In that case, | |
2862 | * they can update the mask without updating the accepted [so that | |
2863 | * PF could communicate to VF it has rejected request]. | |
dacd88d6 | 2864 | */ |
f29ffdb6 MY |
2865 | qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params, |
2866 | mbx, &tlvs_mask, &tlvs_accepted); | |
2867 | ||
f990c82c MY |
2868 | if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id, |
2869 | ¶ms, &tlvs_accepted)) { | |
2870 | tlvs_accepted = 0; | |
2871 | status = PFVF_STATUS_NOT_SUPPORTED; | |
2872 | goto out; | |
2873 | } | |
2874 | ||
f29ffdb6 MY |
2875 | if (!tlvs_accepted) { |
2876 | if (tlvs_mask) | |
2877 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
2878 | "Upper-layer prevents VF vport configuration\n"); | |
2879 | else | |
2880 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
2881 | "No feature tlvs found for vport update\n"); | |
dacd88d6 YM |
2882 | status = PFVF_STATUS_NOT_SUPPORTED; |
2883 | goto out; | |
2884 | } | |
2885 | ||
2886 | rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); | |
2887 | ||
2888 | if (rc) | |
2889 | status = PFVF_STATUS_FAILURE; | |
2890 | ||
2891 | out: | |
f29ffdb6 | 2892 | vfree(p_rss_params); |
dacd88d6 | 2893 | length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, |
f29ffdb6 | 2894 | tlvs_mask, tlvs_accepted); |
dacd88d6 YM |
2895 | qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); |
2896 | } | |
2897 | ||
8246d0b4 YM |
2898 | static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn, |
2899 | struct qed_vf_info *p_vf, | |
2900 | struct qed_filter_ucast *p_params) | |
08feecd7 YM |
2901 | { |
2902 | int i; | |
2903 | ||
08feecd7 YM |
2904 | /* First remove entries and then add new ones */ |
2905 | if (p_params->opcode == QED_FILTER_REMOVE) { | |
2906 | for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) | |
2907 | if (p_vf->shadow_config.vlans[i].used && | |
2908 | p_vf->shadow_config.vlans[i].vid == | |
2909 | p_params->vlan) { | |
2910 | p_vf->shadow_config.vlans[i].used = false; | |
2911 | break; | |
2912 | } | |
2913 | if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { | |
2914 | DP_VERBOSE(p_hwfn, | |
2915 | QED_MSG_IOV, | |
2916 | "VF [%d] - Tries to remove a non-existing vlan\n", | |
2917 | p_vf->relative_vf_id); | |
2918 | return -EINVAL; | |
2919 | } | |
2920 | } else if (p_params->opcode == QED_FILTER_REPLACE || | |
2921 | p_params->opcode == QED_FILTER_FLUSH) { | |
2922 | for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) | |
2923 | p_vf->shadow_config.vlans[i].used = false; | |
2924 | } | |
2925 | ||
2926 | /* In forced mode, we're willing to remove entries - but we don't add | |
2927 | * new ones. | |
2928 | */ | |
1a635e48 | 2929 | if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)) |
08feecd7 YM |
2930 | return 0; |
2931 | ||
2932 | if (p_params->opcode == QED_FILTER_ADD || | |
2933 | p_params->opcode == QED_FILTER_REPLACE) { | |
2934 | for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { | |
2935 | if (p_vf->shadow_config.vlans[i].used) | |
2936 | continue; | |
2937 | ||
2938 | p_vf->shadow_config.vlans[i].used = true; | |
2939 | p_vf->shadow_config.vlans[i].vid = p_params->vlan; | |
2940 | break; | |
2941 | } | |
2942 | ||
2943 | if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { | |
2944 | DP_VERBOSE(p_hwfn, | |
2945 | QED_MSG_IOV, | |
2946 | "VF [%d] - Tries to configure more than %d vlan filters\n", | |
2947 | p_vf->relative_vf_id, | |
2948 | QED_ETH_VF_NUM_VLAN_FILTERS + 1); | |
2949 | return -EINVAL; | |
2950 | } | |
2951 | } | |
2952 | ||
2953 | return 0; | |
2954 | } | |
2955 | ||
8246d0b4 YM |
2956 | static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, |
2957 | struct qed_vf_info *p_vf, | |
2958 | struct qed_filter_ucast *p_params) | |
2959 | { | |
2960 | int i; | |
2961 | ||
2962 | /* If we're in forced-mode, we don't allow any change */ | |
1a635e48 | 2963 | if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) |
8246d0b4 YM |
2964 | return 0; |
2965 | ||
2966 | /* First remove entries and then add new ones */ | |
2967 | if (p_params->opcode == QED_FILTER_REMOVE) { | |
2968 | for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { | |
2969 | if (ether_addr_equal(p_vf->shadow_config.macs[i], | |
2970 | p_params->mac)) { | |
0ee28e31 | 2971 | eth_zero_addr(p_vf->shadow_config.macs[i]); |
8246d0b4 YM |
2972 | break; |
2973 | } | |
2974 | } | |
2975 | ||
2976 | if (i == QED_ETH_VF_NUM_MAC_FILTERS) { | |
2977 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
2978 | "MAC isn't configured\n"); | |
2979 | return -EINVAL; | |
2980 | } | |
2981 | } else if (p_params->opcode == QED_FILTER_REPLACE || | |
2982 | p_params->opcode == QED_FILTER_FLUSH) { | |
2983 | for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) | |
0ee28e31 | 2984 | eth_zero_addr(p_vf->shadow_config.macs[i]); |
8246d0b4 YM |
2985 | } |
2986 | ||
2987 | /* List the new MAC address */ | |
2988 | if (p_params->opcode != QED_FILTER_ADD && | |
2989 | p_params->opcode != QED_FILTER_REPLACE) | |
2990 | return 0; | |
2991 | ||
2992 | for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { | |
2993 | if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) { | |
2994 | ether_addr_copy(p_vf->shadow_config.macs[i], | |
2995 | p_params->mac); | |
2996 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
2997 | "Added MAC at %d entry in shadow\n", i); | |
2998 | break; | |
2999 | } | |
3000 | } | |
3001 | ||
3002 | if (i == QED_ETH_VF_NUM_MAC_FILTERS) { | |
3003 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n"); | |
3004 | return -EINVAL; | |
3005 | } | |
3006 | ||
3007 | return 0; | |
3008 | } | |
3009 | ||
3010 | static int | |
3011 | qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, | |
3012 | struct qed_vf_info *p_vf, | |
3013 | struct qed_filter_ucast *p_params) | |
3014 | { | |
3015 | int rc = 0; | |
3016 | ||
3017 | if (p_params->type == QED_FILTER_MAC) { | |
3018 | rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params); | |
3019 | if (rc) | |
3020 | return rc; | |
3021 | } | |
3022 | ||
3023 | if (p_params->type == QED_FILTER_VLAN) | |
3024 | rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params); | |
3025 | ||
3026 | return rc; | |
3027 | } | |
3028 | ||
ba56947a BX |
3029 | static int qed_iov_chk_ucast(struct qed_hwfn *hwfn, |
3030 | int vfid, struct qed_filter_ucast *params) | |
dacd88d6 YM |
3031 | { |
3032 | struct qed_public_vf_info *vf; | |
3033 | ||
3034 | vf = qed_iov_get_public_vf_info(hwfn, vfid, true); | |
3035 | if (!vf) | |
3036 | return -EINVAL; | |
3037 | ||
3038 | /* No real decision to make; Store the configured MAC */ | |
3039 | if (params->type == QED_FILTER_MAC || | |
3040 | params->type == QED_FILTER_MAC_VLAN) | |
3041 | ether_addr_copy(vf->mac, params->mac); | |
3042 | ||
3043 | return 0; | |
3044 | } | |
3045 | ||
3046 | static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, | |
3047 | struct qed_ptt *p_ptt, | |
3048 | struct qed_vf_info *vf) | |
3049 | { | |
08feecd7 | 3050 | struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt; |
dacd88d6 YM |
3051 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; |
3052 | struct vfpf_ucast_filter_tlv *req; | |
3053 | u8 status = PFVF_STATUS_SUCCESS; | |
3054 | struct qed_filter_ucast params; | |
3055 | int rc; | |
3056 | ||
3057 | /* Prepare the unicast filter params */ | |
3058 | memset(¶ms, 0, sizeof(struct qed_filter_ucast)); | |
3059 | req = &mbx->req_virt->ucast_filter; | |
3060 | params.opcode = (enum qed_filter_opcode)req->opcode; | |
3061 | params.type = (enum qed_filter_ucast_type)req->type; | |
3062 | ||
3063 | params.is_rx_filter = 1; | |
3064 | params.is_tx_filter = 1; | |
3065 | params.vport_to_remove_from = vf->vport_id; | |
3066 | params.vport_to_add_to = vf->vport_id; | |
3067 | memcpy(params.mac, req->mac, ETH_ALEN); | |
3068 | params.vlan = req->vlan; | |
3069 | ||
3070 | DP_VERBOSE(p_hwfn, | |
3071 | QED_MSG_IOV, | |
3072 | "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n", | |
3073 | vf->abs_vf_id, params.opcode, params.type, | |
3074 | params.is_rx_filter ? "RX" : "", | |
3075 | params.is_tx_filter ? "TX" : "", | |
3076 | params.vport_to_add_to, | |
3077 | params.mac[0], params.mac[1], | |
3078 | params.mac[2], params.mac[3], | |
3079 | params.mac[4], params.mac[5], params.vlan); | |
3080 | ||
3081 | if (!vf->vport_instance) { | |
3082 | DP_VERBOSE(p_hwfn, | |
3083 | QED_MSG_IOV, | |
3084 | "No VPORT instance available for VF[%d], failing ucast MAC configuration\n", | |
3085 | vf->abs_vf_id); | |
3086 | status = PFVF_STATUS_FAILURE; | |
3087 | goto out; | |
3088 | } | |
3089 | ||
08feecd7 YM |
3090 | /* Update shadow copy of the VF configuration */ |
3091 | if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms)) { | |
3092 | status = PFVF_STATUS_FAILURE; | |
3093 | goto out; | |
3094 | } | |
3095 | ||
3096 | /* Determine if the unicast filtering is acceptible by PF */ | |
1a635e48 | 3097 | if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) && |
08feecd7 YM |
3098 | (params.type == QED_FILTER_VLAN || |
3099 | params.type == QED_FILTER_MAC_VLAN)) { | |
3100 | /* Once VLAN is forced or PVID is set, do not allow | |
3101 | * to add/replace any further VLANs. | |
3102 | */ | |
3103 | if (params.opcode == QED_FILTER_ADD || | |
3104 | params.opcode == QED_FILTER_REPLACE) | |
3105 | status = PFVF_STATUS_FORCED; | |
3106 | goto out; | |
3107 | } | |
3108 | ||
1a635e48 | 3109 | if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) && |
eff16960 YM |
3110 | (params.type == QED_FILTER_MAC || |
3111 | params.type == QED_FILTER_MAC_VLAN)) { | |
3112 | if (!ether_addr_equal(p_bulletin->mac, params.mac) || | |
3113 | (params.opcode != QED_FILTER_ADD && | |
3114 | params.opcode != QED_FILTER_REPLACE)) | |
3115 | status = PFVF_STATUS_FORCED; | |
3116 | goto out; | |
3117 | } | |
3118 | ||
dacd88d6 YM |
3119 | rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms); |
3120 | if (rc) { | |
3121 | status = PFVF_STATUS_FAILURE; | |
3122 | goto out; | |
3123 | } | |
3124 | ||
3125 | rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms, | |
3126 | QED_SPQ_MODE_CB, NULL); | |
3127 | if (rc) | |
3128 | status = PFVF_STATUS_FAILURE; | |
3129 | ||
3130 | out: | |
3131 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER, | |
3132 | sizeof(struct pfvf_def_resp_tlv), status); | |
3133 | } | |
3134 | ||
0b55e27d YM |
3135 | static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn, |
3136 | struct qed_ptt *p_ptt, | |
3137 | struct qed_vf_info *vf) | |
3138 | { | |
3139 | int i; | |
3140 | ||
3141 | /* Reset the SBs */ | |
3142 | for (i = 0; i < vf->num_sbs; i++) | |
3143 | qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, | |
3144 | vf->igu_sbs[i], | |
3145 | vf->opaque_fid, false); | |
3146 | ||
3147 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP, | |
3148 | sizeof(struct pfvf_def_resp_tlv), | |
3149 | PFVF_STATUS_SUCCESS); | |
3150 | } | |
3151 | ||
3152 | static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn, | |
3153 | struct qed_ptt *p_ptt, struct qed_vf_info *vf) | |
3154 | { | |
3155 | u16 length = sizeof(struct pfvf_def_resp_tlv); | |
3156 | u8 status = PFVF_STATUS_SUCCESS; | |
3157 | ||
3158 | /* Disable Interrupts for VF */ | |
3159 | qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); | |
3160 | ||
3161 | /* Reset Permission table */ | |
3162 | qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); | |
3163 | ||
3164 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE, | |
3165 | length, status); | |
3166 | } | |
3167 | ||
3168 | static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, | |
3169 | struct qed_ptt *p_ptt, | |
3170 | struct qed_vf_info *p_vf) | |
3171 | { | |
3172 | u16 length = sizeof(struct pfvf_def_resp_tlv); | |
1fe614d1 YM |
3173 | u8 status = PFVF_STATUS_SUCCESS; |
3174 | int rc = 0; | |
0b55e27d YM |
3175 | |
3176 | qed_iov_vf_cleanup(p_hwfn, p_vf); | |
3177 | ||
1fe614d1 YM |
3178 | if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { |
3179 | /* Stopping the VF */ | |
3180 | rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid, | |
3181 | p_vf->opaque_fid); | |
3182 | ||
3183 | if (rc) { | |
3184 | DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", | |
3185 | rc); | |
3186 | status = PFVF_STATUS_FAILURE; | |
3187 | } | |
3188 | ||
3189 | p_vf->state = VF_STOPPED; | |
3190 | } | |
3191 | ||
0b55e27d | 3192 | qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, |
1fe614d1 | 3193 | length, status); |
0b55e27d YM |
3194 | } |
3195 | ||
3196 | static int | |
3197 | qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, | |
3198 | struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) | |
3199 | { | |
3200 | int cnt; | |
3201 | u32 val; | |
3202 | ||
3203 | qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); | |
3204 | ||
3205 | for (cnt = 0; cnt < 50; cnt++) { | |
3206 | val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); | |
3207 | if (!val) | |
3208 | break; | |
3209 | msleep(20); | |
3210 | } | |
3211 | qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); | |
3212 | ||
3213 | if (cnt == 50) { | |
3214 | DP_ERR(p_hwfn, | |
3215 | "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", | |
3216 | p_vf->abs_vf_id, val); | |
3217 | return -EBUSY; | |
3218 | } | |
3219 | ||
3220 | return 0; | |
3221 | } | |
3222 | ||
3223 | static int | |
3224 | qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, | |
3225 | struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) | |
3226 | { | |
3227 | u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS]; | |
3228 | int i, cnt; | |
3229 | ||
3230 | /* Read initial consumers & producers */ | |
3231 | for (i = 0; i < MAX_NUM_VOQS; i++) { | |
3232 | u32 prod; | |
3233 | ||
3234 | cons[i] = qed_rd(p_hwfn, p_ptt, | |
3235 | PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + | |
3236 | i * 0x40); | |
3237 | prod = qed_rd(p_hwfn, p_ptt, | |
3238 | PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 + | |
3239 | i * 0x40); | |
3240 | distance[i] = prod - cons[i]; | |
3241 | } | |
3242 | ||
3243 | /* Wait for consumers to pass the producers */ | |
3244 | i = 0; | |
3245 | for (cnt = 0; cnt < 50; cnt++) { | |
3246 | for (; i < MAX_NUM_VOQS; i++) { | |
3247 | u32 tmp; | |
3248 | ||
3249 | tmp = qed_rd(p_hwfn, p_ptt, | |
3250 | PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + | |
3251 | i * 0x40); | |
3252 | if (distance[i] > tmp - cons[i]) | |
3253 | break; | |
3254 | } | |
3255 | ||
3256 | if (i == MAX_NUM_VOQS) | |
3257 | break; | |
3258 | ||
3259 | msleep(20); | |
3260 | } | |
3261 | ||
3262 | if (cnt == 50) { | |
3263 | DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n", | |
3264 | p_vf->abs_vf_id, i); | |
3265 | return -EBUSY; | |
3266 | } | |
3267 | ||
3268 | return 0; | |
3269 | } | |
3270 | ||
3271 | static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn, | |
3272 | struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) | |
3273 | { | |
3274 | int rc; | |
3275 | ||
3276 | rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); | |
3277 | if (rc) | |
3278 | return rc; | |
3279 | ||
3280 | rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); | |
3281 | if (rc) | |
3282 | return rc; | |
3283 | ||
3284 | return 0; | |
3285 | } | |
3286 | ||
3287 | static int | |
3288 | qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, | |
3289 | struct qed_ptt *p_ptt, | |
3290 | u16 rel_vf_id, u32 *ack_vfs) | |
3291 | { | |
3292 | struct qed_vf_info *p_vf; | |
3293 | int rc = 0; | |
3294 | ||
3295 | p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); | |
3296 | if (!p_vf) | |
3297 | return 0; | |
3298 | ||
3299 | if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & | |
3300 | (1ULL << (rel_vf_id % 64))) { | |
3301 | u16 vfid = p_vf->abs_vf_id; | |
3302 | ||
3303 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
3304 | "VF[%d] - Handling FLR\n", vfid); | |
3305 | ||
3306 | qed_iov_vf_cleanup(p_hwfn, p_vf); | |
3307 | ||
3308 | /* If VF isn't active, no need for anything but SW */ | |
3309 | if (!p_vf->b_init) | |
3310 | goto cleanup; | |
3311 | ||
3312 | rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); | |
3313 | if (rc) | |
3314 | goto cleanup; | |
3315 | ||
3316 | rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true); | |
3317 | if (rc) { | |
3318 | DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid); | |
3319 | return rc; | |
3320 | } | |
3321 | ||
7eff82b0 YM |
3322 | /* Workaround to make VF-PF channel ready, as FW |
3323 | * doesn't do that as a part of FLR. | |
3324 | */ | |
3325 | REG_WR(p_hwfn, | |
3326 | GTT_BAR0_MAP_REG_USDM_RAM + | |
3327 | USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1); | |
3328 | ||
0b55e27d YM |
3329 | /* VF_STOPPED has to be set only after final cleanup |
3330 | * but prior to re-enabling the VF. | |
3331 | */ | |
3332 | p_vf->state = VF_STOPPED; | |
3333 | ||
3334 | rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); | |
3335 | if (rc) { | |
3336 | DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", | |
3337 | vfid); | |
3338 | return rc; | |
3339 | } | |
3340 | cleanup: | |
3341 | /* Mark VF for ack and clean pending state */ | |
3342 | if (p_vf->state == VF_RESET) | |
3343 | p_vf->state = VF_STOPPED; | |
1a635e48 | 3344 | ack_vfs[vfid / 32] |= BIT((vfid % 32)); |
0b55e27d YM |
3345 | p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= |
3346 | ~(1ULL << (rel_vf_id % 64)); | |
fd3c615a | 3347 | p_vf->vf_mbx.b_pending_msg = false; |
0b55e27d YM |
3348 | } |
3349 | ||
3350 | return rc; | |
3351 | } | |
3352 | ||
ba56947a BX |
3353 | static int |
3354 | qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |
0b55e27d YM |
3355 | { |
3356 | u32 ack_vfs[VF_MAX_STATIC / 32]; | |
3357 | int rc = 0; | |
3358 | u16 i; | |
3359 | ||
3360 | memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); | |
3361 | ||
3362 | /* Since BRB <-> PRS interface can't be tested as part of the flr | |
3363 | * polling due to HW limitations, simply sleep a bit. And since | |
3364 | * there's no need to wait per-vf, do it before looping. | |
3365 | */ | |
3366 | msleep(100); | |
3367 | ||
3368 | for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) | |
3369 | qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs); | |
3370 | ||
3371 | rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); | |
3372 | return rc; | |
3373 | } | |
3374 | ||
cccf6f5c | 3375 | bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) |
0b55e27d | 3376 | { |
cccf6f5c MY |
3377 | bool found = false; |
3378 | u16 i; | |
0b55e27d YM |
3379 | |
3380 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n"); | |
3381 | for (i = 0; i < (VF_MAX_STATIC / 32); i++) | |
3382 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
3383 | "[%08x,...,%08x]: %08x\n", | |
3384 | i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); | |
3385 | ||
3386 | if (!p_hwfn->cdev->p_iov_info) { | |
3387 | DP_NOTICE(p_hwfn, "VF flr but no IOV\n"); | |
cccf6f5c | 3388 | return false; |
0b55e27d YM |
3389 | } |
3390 | ||
3391 | /* Mark VFs */ | |
3392 | for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) { | |
3393 | struct qed_vf_info *p_vf; | |
3394 | u8 vfid; | |
3395 | ||
3396 | p_vf = qed_iov_get_vf_info(p_hwfn, i, false); | |
3397 | if (!p_vf) | |
3398 | continue; | |
3399 | ||
3400 | vfid = p_vf->abs_vf_id; | |
1a635e48 | 3401 | if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) { |
0b55e27d YM |
3402 | u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; |
3403 | u16 rel_vf_id = p_vf->relative_vf_id; | |
3404 | ||
3405 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
3406 | "VF[%d] [rel %d] got FLR-ed\n", | |
3407 | vfid, rel_vf_id); | |
3408 | ||
3409 | p_vf->state = VF_RESET; | |
3410 | ||
3411 | /* No need to lock here, since pending_flr should | |
3412 | * only change here and before ACKing MFw. Since | |
3413 | * MFW will not trigger an additional attention for | |
3414 | * VF flr until ACKs, we're safe. | |
3415 | */ | |
3416 | p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); | |
cccf6f5c | 3417 | found = true; |
0b55e27d YM |
3418 | } |
3419 | } | |
3420 | ||
3421 | return found; | |
3422 | } | |
3423 | ||
73390ac9 YM |
3424 | static void qed_iov_get_link(struct qed_hwfn *p_hwfn, |
3425 | u16 vfid, | |
3426 | struct qed_mcp_link_params *p_params, | |
3427 | struct qed_mcp_link_state *p_link, | |
3428 | struct qed_mcp_link_capabilities *p_caps) | |
3429 | { | |
3430 | struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, | |
3431 | vfid, | |
3432 | false); | |
3433 | struct qed_bulletin_content *p_bulletin; | |
3434 | ||
3435 | if (!p_vf) | |
3436 | return; | |
3437 | ||
3438 | p_bulletin = p_vf->bulletin.p_virt; | |
3439 | ||
3440 | if (p_params) | |
3441 | __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin); | |
3442 | if (p_link) | |
3443 | __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); | |
3444 | if (p_caps) | |
3445 | __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); | |
3446 | } | |
3447 | ||
37bff2b9 YM |
3448 | static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, |
3449 | struct qed_ptt *p_ptt, int vfid) | |
3450 | { | |
3451 | struct qed_iov_vf_mbx *mbx; | |
3452 | struct qed_vf_info *p_vf; | |
37bff2b9 YM |
3453 | |
3454 | p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); | |
3455 | if (!p_vf) | |
3456 | return; | |
3457 | ||
3458 | mbx = &p_vf->vf_mbx; | |
3459 | ||
3460 | /* qed_iov_process_mbx_request */ | |
fd3c615a MY |
3461 | if (!mbx->b_pending_msg) { |
3462 | DP_NOTICE(p_hwfn, | |
3463 | "VF[%02x]: Trying to process mailbox message when none is pending\n", | |
3464 | p_vf->abs_vf_id); | |
3465 | return; | |
3466 | } | |
3467 | mbx->b_pending_msg = false; | |
37bff2b9 YM |
3468 | |
3469 | mbx->first_tlv = mbx->req_virt->first_tlv; | |
3470 | ||
fd3c615a MY |
3471 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
3472 | "VF[%02x]: Processing mailbox message [type %04x]\n", | |
3473 | p_vf->abs_vf_id, mbx->first_tlv.tl.type); | |
3474 | ||
37bff2b9 | 3475 | /* check if tlv type is known */ |
7eff82b0 YM |
3476 | if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) && |
3477 | !p_vf->b_malicious) { | |
1408cc1f YM |
3478 | switch (mbx->first_tlv.tl.type) { |
3479 | case CHANNEL_TLV_ACQUIRE: | |
3480 | qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); | |
3481 | break; | |
dacd88d6 YM |
3482 | case CHANNEL_TLV_VPORT_START: |
3483 | qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); | |
3484 | break; | |
3485 | case CHANNEL_TLV_VPORT_TEARDOWN: | |
3486 | qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); | |
3487 | break; | |
3488 | case CHANNEL_TLV_START_RXQ: | |
3489 | qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); | |
3490 | break; | |
3491 | case CHANNEL_TLV_START_TXQ: | |
3492 | qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); | |
3493 | break; | |
3494 | case CHANNEL_TLV_STOP_RXQS: | |
3495 | qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); | |
3496 | break; | |
3497 | case CHANNEL_TLV_STOP_TXQS: | |
3498 | qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); | |
3499 | break; | |
17b235c1 YM |
3500 | case CHANNEL_TLV_UPDATE_RXQ: |
3501 | qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); | |
3502 | break; | |
dacd88d6 YM |
3503 | case CHANNEL_TLV_VPORT_UPDATE: |
3504 | qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); | |
3505 | break; | |
3506 | case CHANNEL_TLV_UCAST_FILTER: | |
3507 | qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); | |
3508 | break; | |
0b55e27d YM |
3509 | case CHANNEL_TLV_CLOSE: |
3510 | qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); | |
3511 | break; | |
3512 | case CHANNEL_TLV_INT_CLEANUP: | |
3513 | qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); | |
3514 | break; | |
3515 | case CHANNEL_TLV_RELEASE: | |
3516 | qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); | |
3517 | break; | |
eaf3c0c6 CM |
3518 | case CHANNEL_TLV_UPDATE_TUNN_PARAM: |
3519 | qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); | |
3520 | break; | |
1408cc1f | 3521 | } |
7eff82b0 YM |
3522 | } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { |
3523 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
3524 | "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n", | |
3525 | p_vf->abs_vf_id, mbx->first_tlv.tl.type); | |
3526 | ||
3527 | qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, | |
3528 | mbx->first_tlv.tl.type, | |
3529 | sizeof(struct pfvf_def_resp_tlv), | |
3530 | PFVF_STATUS_MALICIOUS); | |
37bff2b9 YM |
3531 | } else { |
3532 | /* unknown TLV - this may belong to a VF driver from the future | |
3533 | * - a version written after this PF driver was written, which | |
3534 | * supports features unknown as of yet. Too bad since we don't | |
3535 | * support them. Or this may be because someone wrote a crappy | |
3536 | * VF driver and is sending garbage over the channel. | |
3537 | */ | |
54fdd80f YM |
3538 | DP_NOTICE(p_hwfn, |
3539 | "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n", | |
3540 | p_vf->abs_vf_id, | |
3541 | mbx->first_tlv.tl.type, | |
3542 | mbx->first_tlv.tl.length, | |
3543 | mbx->first_tlv.padding, mbx->first_tlv.reply_address); | |
3544 | ||
3545 | /* Try replying in case reply address matches the acquisition's | |
3546 | * posted address. | |
3547 | */ | |
3548 | if (p_vf->acquire.first_tlv.reply_address && | |
3549 | (mbx->first_tlv.reply_address == | |
3550 | p_vf->acquire.first_tlv.reply_address)) { | |
3551 | qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, | |
3552 | mbx->first_tlv.tl.type, | |
3553 | sizeof(struct pfvf_def_resp_tlv), | |
3554 | PFVF_STATUS_NOT_SUPPORTED); | |
3555 | } else { | |
37bff2b9 YM |
3556 | DP_VERBOSE(p_hwfn, |
3557 | QED_MSG_IOV, | |
54fdd80f YM |
3558 | "VF[%02x]: Can't respond to TLV - no valid reply address\n", |
3559 | p_vf->abs_vf_id); | |
37bff2b9 YM |
3560 | } |
3561 | } | |
3562 | } | |
3563 | ||
fd3c615a | 3564 | void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) |
37bff2b9 | 3565 | { |
fd3c615a | 3566 | int i; |
37bff2b9 | 3567 | |
fd3c615a | 3568 | memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); |
37bff2b9 | 3569 | |
fd3c615a MY |
3570 | qed_for_each_vf(p_hwfn, i) { |
3571 | struct qed_vf_info *p_vf; | |
37bff2b9 | 3572 | |
fd3c615a MY |
3573 | p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; |
3574 | if (p_vf->vf_mbx.b_pending_msg) | |
3575 | events[i / 64] |= 1ULL << (i % 64); | |
3576 | } | |
37bff2b9 YM |
3577 | } |
3578 | ||
7eff82b0 YM |
3579 | static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, |
3580 | u16 abs_vfid) | |
37bff2b9 | 3581 | { |
7eff82b0 | 3582 | u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf; |
37bff2b9 | 3583 | |
7eff82b0 | 3584 | if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { |
37bff2b9 YM |
3585 | DP_VERBOSE(p_hwfn, |
3586 | QED_MSG_IOV, | |
7eff82b0 | 3587 | "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n", |
37bff2b9 | 3588 | abs_vfid); |
7eff82b0 | 3589 | return NULL; |
37bff2b9 | 3590 | } |
7eff82b0 YM |
3591 | |
3592 | return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min]; | |
3593 | } | |
3594 | ||
3595 | static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, | |
3596 | u16 abs_vfid, struct regpair *vf_msg) | |
3597 | { | |
3598 | struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn, | |
3599 | abs_vfid); | |
3600 | ||
3601 | if (!p_vf) | |
3602 | return 0; | |
37bff2b9 YM |
3603 | |
3604 | /* List the physical address of the request so that handler | |
3605 | * could later on copy the message from it. | |
3606 | */ | |
3607 | p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; | |
3608 | ||
3609 | /* Mark the event and schedule the workqueue */ | |
fd3c615a | 3610 | p_vf->vf_mbx.b_pending_msg = true; |
37bff2b9 YM |
3611 | qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); |
3612 | ||
3613 | return 0; | |
3614 | } | |
3615 | ||
7eff82b0 YM |
3616 | static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, |
3617 | struct malicious_vf_eqe_data *p_data) | |
3618 | { | |
3619 | struct qed_vf_info *p_vf; | |
3620 | ||
3621 | p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); | |
3622 | ||
3623 | if (!p_vf) | |
3624 | return; | |
3625 | ||
e99a21cb MY |
3626 | if (!p_vf->b_malicious) { |
3627 | DP_NOTICE(p_hwfn, | |
3628 | "VF [%d] - Malicious behavior [%02x]\n", | |
3629 | p_vf->abs_vf_id, p_data->err_id); | |
7eff82b0 | 3630 | |
e99a21cb MY |
3631 | p_vf->b_malicious = true; |
3632 | } else { | |
3633 | DP_INFO(p_hwfn, | |
3634 | "VF [%d] - Malicious behavior [%02x]\n", | |
3635 | p_vf->abs_vf_id, p_data->err_id); | |
3636 | } | |
7eff82b0 YM |
3637 | } |
3638 | ||
37bff2b9 YM |
3639 | int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, |
3640 | u8 opcode, __le16 echo, union event_ring_data *data) | |
3641 | { | |
3642 | switch (opcode) { | |
3643 | case COMMON_EVENT_VF_PF_CHANNEL: | |
3644 | return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), | |
3645 | &data->vf_pf_channel.msg_addr); | |
7eff82b0 YM |
3646 | case COMMON_EVENT_MALICIOUS_VF: |
3647 | qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf); | |
3648 | return 0; | |
37bff2b9 YM |
3649 | default: |
3650 | DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", | |
3651 | opcode); | |
3652 | return -EINVAL; | |
3653 | } | |
3654 | } | |
3655 | ||
32a47e72 YM |
3656 | u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) |
3657 | { | |
3658 | struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; | |
3659 | u16 i; | |
3660 | ||
3661 | if (!p_iov) | |
3662 | goto out; | |
3663 | ||
3664 | for (i = rel_vf_id; i < p_iov->total_vfs; i++) | |
7eff82b0 | 3665 | if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false)) |
32a47e72 YM |
3666 | return i; |
3667 | ||
3668 | out: | |
3669 | return MAX_NUM_VFS; | |
3670 | } | |
37bff2b9 YM |
3671 | |
3672 | static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, | |
3673 | int vfid) | |
3674 | { | |
3675 | struct qed_dmae_params params; | |
3676 | struct qed_vf_info *vf_info; | |
3677 | ||
3678 | vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); | |
3679 | if (!vf_info) | |
3680 | return -EINVAL; | |
3681 | ||
3682 | memset(¶ms, 0, sizeof(struct qed_dmae_params)); | |
3683 | params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST; | |
3684 | params.src_vfid = vf_info->abs_vf_id; | |
3685 | ||
3686 | if (qed_dmae_host2host(p_hwfn, ptt, | |
3687 | vf_info->vf_mbx.pending_req, | |
3688 | vf_info->vf_mbx.req_phys, | |
3689 | sizeof(union vfpf_tlvs) / 4, ¶ms)) { | |
3690 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
3691 | "Failed to copy message from VF 0x%02x\n", vfid); | |
3692 | ||
3693 | return -EIO; | |
3694 | } | |
3695 | ||
3696 | return 0; | |
3697 | } | |
3698 | ||
eff16960 YM |
3699 | static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, |
3700 | u8 *mac, int vfid) | |
3701 | { | |
3702 | struct qed_vf_info *vf_info; | |
3703 | u64 feature; | |
3704 | ||
3705 | vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); | |
3706 | if (!vf_info) { | |
3707 | DP_NOTICE(p_hwfn->cdev, | |
3708 | "Can not set forced MAC, invalid vfid [%d]\n", vfid); | |
3709 | return; | |
3710 | } | |
3711 | ||
7eff82b0 YM |
3712 | if (vf_info->b_malicious) { |
3713 | DP_NOTICE(p_hwfn->cdev, | |
3714 | "Can't set forced MAC to malicious VF [%d]\n", vfid); | |
3715 | return; | |
3716 | } | |
3717 | ||
eff16960 YM |
3718 | feature = 1 << MAC_ADDR_FORCED; |
3719 | memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); | |
3720 | ||
3721 | vf_info->bulletin.p_virt->valid_bitmap |= feature; | |
3722 | /* Forced MAC will disable MAC_ADDR */ | |
1a635e48 | 3723 | vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR); |
eff16960 YM |
3724 | |
3725 | qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); | |
3726 | } | |
3727 | ||
ba56947a BX |
3728 | static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, |
3729 | u16 pvid, int vfid) | |
08feecd7 YM |
3730 | { |
3731 | struct qed_vf_info *vf_info; | |
3732 | u64 feature; | |
3733 | ||
3734 | vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); | |
3735 | if (!vf_info) { | |
3736 | DP_NOTICE(p_hwfn->cdev, | |
3737 | "Can not set forced MAC, invalid vfid [%d]\n", vfid); | |
3738 | return; | |
3739 | } | |
3740 | ||
7eff82b0 YM |
3741 | if (vf_info->b_malicious) { |
3742 | DP_NOTICE(p_hwfn->cdev, | |
3743 | "Can't set forced vlan to malicious VF [%d]\n", vfid); | |
3744 | return; | |
3745 | } | |
3746 | ||
08feecd7 YM |
3747 | feature = 1 << VLAN_ADDR_FORCED; |
3748 | vf_info->bulletin.p_virt->pvid = pvid; | |
3749 | if (pvid) | |
3750 | vf_info->bulletin.p_virt->valid_bitmap |= feature; | |
3751 | else | |
3752 | vf_info->bulletin.p_virt->valid_bitmap &= ~feature; | |
3753 | ||
3754 | qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); | |
3755 | } | |
3756 | ||
97379f15 CM |
3757 | void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, |
3758 | int vfid, u16 vxlan_port, u16 geneve_port) | |
3759 | { | |
3760 | struct qed_vf_info *vf_info; | |
3761 | ||
3762 | vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); | |
3763 | if (!vf_info) { | |
3764 | DP_NOTICE(p_hwfn->cdev, | |
3765 | "Can not set udp ports, invalid vfid [%d]\n", vfid); | |
3766 | return; | |
3767 | } | |
3768 | ||
3769 | if (vf_info->b_malicious) { | |
3770 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
3771 | "Can not set udp ports to malicious VF [%d]\n", | |
3772 | vfid); | |
3773 | return; | |
3774 | } | |
3775 | ||
3776 | vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port; | |
3777 | vf_info->bulletin.p_virt->geneve_udp_port = geneve_port; | |
3778 | } | |
3779 | ||
6ddc7608 YM |
3780 | static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) |
3781 | { | |
3782 | struct qed_vf_info *p_vf_info; | |
3783 | ||
3784 | p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); | |
3785 | if (!p_vf_info) | |
3786 | return false; | |
3787 | ||
3788 | return !!p_vf_info->vport_instance; | |
3789 | } | |
3790 | ||
ba56947a | 3791 | static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) |
0b55e27d YM |
3792 | { |
3793 | struct qed_vf_info *p_vf_info; | |
3794 | ||
3795 | p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); | |
3796 | if (!p_vf_info) | |
3797 | return true; | |
3798 | ||
3799 | return p_vf_info->state == VF_STOPPED; | |
3800 | } | |
3801 | ||
73390ac9 YM |
3802 | static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid) |
3803 | { | |
3804 | struct qed_vf_info *vf_info; | |
3805 | ||
3806 | vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); | |
3807 | if (!vf_info) | |
3808 | return false; | |
3809 | ||
3810 | return vf_info->spoof_chk; | |
3811 | } | |
3812 | ||
ba56947a | 3813 | static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) |
6ddc7608 YM |
3814 | { |
3815 | struct qed_vf_info *vf; | |
3816 | int rc = -EINVAL; | |
3817 | ||
3818 | if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { | |
3819 | DP_NOTICE(p_hwfn, | |
3820 | "SR-IOV sanity check failed, can't set spoofchk\n"); | |
3821 | goto out; | |
3822 | } | |
3823 | ||
3824 | vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); | |
3825 | if (!vf) | |
3826 | goto out; | |
3827 | ||
3828 | if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) { | |
3829 | /* After VF VPORT start PF will configure spoof check */ | |
3830 | vf->req_spoofchk_val = val; | |
3831 | rc = 0; | |
3832 | goto out; | |
3833 | } | |
3834 | ||
3835 | rc = __qed_iov_spoofchk_set(p_hwfn, vf, val); | |
3836 | ||
3837 | out: | |
3838 | return rc; | |
3839 | } | |
3840 | ||
eff16960 YM |
3841 | static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, |
3842 | u16 rel_vf_id) | |
3843 | { | |
3844 | struct qed_vf_info *p_vf; | |
3845 | ||
3846 | p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); | |
3847 | if (!p_vf || !p_vf->bulletin.p_virt) | |
3848 | return NULL; | |
3849 | ||
1a635e48 | 3850 | if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) |
eff16960 YM |
3851 | return NULL; |
3852 | ||
3853 | return p_vf->bulletin.p_virt->mac; | |
3854 | } | |
3855 | ||
ba56947a BX |
3856 | static u16 |
3857 | qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) | |
08feecd7 YM |
3858 | { |
3859 | struct qed_vf_info *p_vf; | |
3860 | ||
3861 | p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); | |
3862 | if (!p_vf || !p_vf->bulletin.p_virt) | |
3863 | return 0; | |
3864 | ||
1a635e48 | 3865 | if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))) |
08feecd7 YM |
3866 | return 0; |
3867 | ||
3868 | return p_vf->bulletin.p_virt->pvid; | |
3869 | } | |
3870 | ||
733def6a YM |
3871 | static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, |
3872 | struct qed_ptt *p_ptt, int vfid, int val) | |
3873 | { | |
3874 | struct qed_vf_info *vf; | |
3875 | u8 abs_vp_id = 0; | |
3876 | int rc; | |
3877 | ||
3878 | vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); | |
3879 | if (!vf) | |
3880 | return -EINVAL; | |
3881 | ||
3882 | rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id); | |
3883 | if (rc) | |
3884 | return rc; | |
3885 | ||
3886 | return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val); | |
3887 | } | |
3888 | ||
ba56947a BX |
3889 | static int |
3890 | qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) | |
733def6a YM |
3891 | { |
3892 | struct qed_vf_info *vf; | |
3893 | u8 vport_id; | |
3894 | int i; | |
3895 | ||
3896 | for_each_hwfn(cdev, i) { | |
3897 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | |
3898 | ||
3899 | if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { | |
3900 | DP_NOTICE(p_hwfn, | |
3901 | "SR-IOV sanity check failed, can't set min rate\n"); | |
3902 | return -EINVAL; | |
3903 | } | |
3904 | } | |
3905 | ||
3906 | vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true); | |
3907 | vport_id = vf->vport_id; | |
3908 | ||
3909 | return qed_configure_vport_wfq(cdev, vport_id, rate); | |
3910 | } | |
3911 | ||
73390ac9 YM |
3912 | static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) |
3913 | { | |
3914 | struct qed_wfq_data *vf_vp_wfq; | |
3915 | struct qed_vf_info *vf_info; | |
3916 | ||
3917 | vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); | |
3918 | if (!vf_info) | |
3919 | return 0; | |
3920 | ||
3921 | vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; | |
3922 | ||
3923 | if (vf_vp_wfq->configured) | |
3924 | return vf_vp_wfq->min_speed; | |
3925 | else | |
3926 | return 0; | |
3927 | } | |
3928 | ||
37bff2b9 YM |
3929 | /** |
3930 | * qed_schedule_iov - schedules IOV task for VF and PF | |
3931 | * @hwfn: hardware function pointer | |
3932 | * @flag: IOV flag for VF/PF | |
3933 | */ | |
3934 | void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) | |
3935 | { | |
3936 | smp_mb__before_atomic(); | |
3937 | set_bit(flag, &hwfn->iov_task_flags); | |
3938 | smp_mb__after_atomic(); | |
3939 | DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); | |
3940 | queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); | |
3941 | } | |
3942 | ||
1408cc1f YM |
3943 | void qed_vf_start_iov_wq(struct qed_dev *cdev) |
3944 | { | |
3945 | int i; | |
3946 | ||
3947 | for_each_hwfn(cdev, i) | |
3948 | queue_delayed_work(cdev->hwfns[i].iov_wq, | |
3949 | &cdev->hwfns[i].iov_task, 0); | |
3950 | } | |
3951 | ||
0b55e27d YM |
3952 | int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) |
3953 | { | |
3954 | int i, j; | |
3955 | ||
3956 | for_each_hwfn(cdev, i) | |
3957 | if (cdev->hwfns[i].iov_wq) | |
3958 | flush_workqueue(cdev->hwfns[i].iov_wq); | |
3959 | ||
3960 | /* Mark VFs for disablement */ | |
3961 | qed_iov_set_vfs_to_disable(cdev, true); | |
3962 | ||
3963 | if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) | |
3964 | pci_disable_sriov(cdev->pdev); | |
3965 | ||
3966 | for_each_hwfn(cdev, i) { | |
3967 | struct qed_hwfn *hwfn = &cdev->hwfns[i]; | |
3968 | struct qed_ptt *ptt = qed_ptt_acquire(hwfn); | |
3969 | ||
3970 | /* Failure to acquire the ptt in 100g creates an odd error | |
3971 | * where the first engine has already relased IOV. | |
3972 | */ | |
3973 | if (!ptt) { | |
3974 | DP_ERR(hwfn, "Failed to acquire ptt\n"); | |
3975 | return -EBUSY; | |
3976 | } | |
3977 | ||
733def6a YM |
3978 | /* Clean WFQ db and configure equal weight for all vports */ |
3979 | qed_clean_wfq_db(hwfn, ptt); | |
3980 | ||
0b55e27d YM |
3981 | qed_for_each_vf(hwfn, j) { |
3982 | int k; | |
3983 | ||
7eff82b0 | 3984 | if (!qed_iov_is_valid_vfid(hwfn, j, true, false)) |
0b55e27d YM |
3985 | continue; |
3986 | ||
3987 | /* Wait until VF is disabled before releasing */ | |
3988 | for (k = 0; k < 100; k++) { | |
3989 | if (!qed_iov_is_vf_stopped(hwfn, j)) | |
3990 | msleep(20); | |
3991 | else | |
3992 | break; | |
3993 | } | |
3994 | ||
3995 | if (k < 100) | |
3996 | qed_iov_release_hw_for_vf(&cdev->hwfns[i], | |
3997 | ptt, j); | |
3998 | else | |
3999 | DP_ERR(hwfn, | |
4000 | "Timeout waiting for VF's FLR to end\n"); | |
4001 | } | |
4002 | ||
4003 | qed_ptt_release(hwfn, ptt); | |
4004 | } | |
4005 | ||
4006 | qed_iov_set_vfs_to_disable(cdev, false); | |
4007 | ||
4008 | return 0; | |
4009 | } | |
4010 | ||
3da7a37a MY |
4011 | static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, |
4012 | u16 vfid, | |
4013 | struct qed_iov_vf_init_params *params) | |
4014 | { | |
4015 | u16 base, i; | |
4016 | ||
4017 | /* Since we have an equal resource distribution per-VF, and we assume | |
4018 | * PF has acquired the QED_PF_L2_QUE first queues, we start setting | |
4019 | * sequentially from there. | |
4020 | */ | |
4021 | base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues; | |
4022 | ||
4023 | params->rel_vf_id = vfid; | |
4024 | for (i = 0; i < params->num_queues; i++) { | |
4025 | params->req_rx_queue[i] = base + i; | |
4026 | params->req_tx_queue[i] = base + i; | |
4027 | } | |
4028 | } | |
4029 | ||
0b55e27d YM |
4030 | static int qed_sriov_enable(struct qed_dev *cdev, int num) |
4031 | { | |
3da7a37a | 4032 | struct qed_iov_vf_init_params params; |
0b55e27d YM |
4033 | int i, j, rc; |
4034 | ||
4035 | if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { | |
4036 | DP_NOTICE(cdev, "Can start at most %d VFs\n", | |
4037 | RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1); | |
4038 | return -EINVAL; | |
4039 | } | |
4040 | ||
3da7a37a MY |
4041 | memset(¶ms, 0, sizeof(params)); |
4042 | ||
0b55e27d YM |
4043 | /* Initialize HW for VF access */ |
4044 | for_each_hwfn(cdev, j) { | |
4045 | struct qed_hwfn *hwfn = &cdev->hwfns[j]; | |
4046 | struct qed_ptt *ptt = qed_ptt_acquire(hwfn); | |
5a1f965a MY |
4047 | |
4048 | /* Make sure not to use more than 16 queues per VF */ | |
3da7a37a MY |
4049 | params.num_queues = min_t(int, |
4050 | FEAT_NUM(hwfn, QED_VF_L2_QUE) / num, | |
4051 | 16); | |
0b55e27d YM |
4052 | |
4053 | if (!ptt) { | |
4054 | DP_ERR(hwfn, "Failed to acquire ptt\n"); | |
4055 | rc = -EBUSY; | |
4056 | goto err; | |
4057 | } | |
4058 | ||
0b55e27d | 4059 | for (i = 0; i < num; i++) { |
7eff82b0 | 4060 | if (!qed_iov_is_valid_vfid(hwfn, i, false, true)) |
0b55e27d YM |
4061 | continue; |
4062 | ||
3da7a37a MY |
4063 | qed_sriov_enable_qid_config(hwfn, i, ¶ms); |
4064 | rc = qed_iov_init_hw_for_vf(hwfn, ptt, ¶ms); | |
0b55e27d YM |
4065 | if (rc) { |
4066 | DP_ERR(cdev, "Failed to enable VF[%d]\n", i); | |
4067 | qed_ptt_release(hwfn, ptt); | |
4068 | goto err; | |
4069 | } | |
4070 | } | |
4071 | ||
4072 | qed_ptt_release(hwfn, ptt); | |
4073 | } | |
4074 | ||
4075 | /* Enable SRIOV PCIe functions */ | |
4076 | rc = pci_enable_sriov(cdev->pdev, num); | |
4077 | if (rc) { | |
4078 | DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc); | |
4079 | goto err; | |
4080 | } | |
4081 | ||
4082 | return num; | |
4083 | ||
4084 | err: | |
4085 | qed_sriov_disable(cdev, false); | |
4086 | return rc; | |
4087 | } | |
4088 | ||
4089 | static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) | |
4090 | { | |
4091 | if (!IS_QED_SRIOV(cdev)) { | |
4092 | DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n"); | |
4093 | return -EOPNOTSUPP; | |
4094 | } | |
4095 | ||
4096 | if (num_vfs_param) | |
4097 | return qed_sriov_enable(cdev, num_vfs_param); | |
4098 | else | |
4099 | return qed_sriov_disable(cdev, true); | |
4100 | } | |
4101 | ||
eff16960 YM |
4102 | static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) |
4103 | { | |
4104 | int i; | |
4105 | ||
4106 | if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { | |
4107 | DP_VERBOSE(cdev, QED_MSG_IOV, | |
4108 | "Cannot set a VF MAC; Sriov is not enabled\n"); | |
4109 | return -EINVAL; | |
4110 | } | |
4111 | ||
7eff82b0 | 4112 | if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { |
eff16960 YM |
4113 | DP_VERBOSE(cdev, QED_MSG_IOV, |
4114 | "Cannot set VF[%d] MAC (VF is not active)\n", vfid); | |
4115 | return -EINVAL; | |
4116 | } | |
4117 | ||
4118 | for_each_hwfn(cdev, i) { | |
4119 | struct qed_hwfn *hwfn = &cdev->hwfns[i]; | |
4120 | struct qed_public_vf_info *vf_info; | |
4121 | ||
4122 | vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); | |
4123 | if (!vf_info) | |
4124 | continue; | |
4125 | ||
4126 | /* Set the forced MAC, and schedule the IOV task */ | |
4127 | ether_addr_copy(vf_info->forced_mac, mac); | |
4128 | qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); | |
4129 | } | |
4130 | ||
4131 | return 0; | |
4132 | } | |
4133 | ||
08feecd7 YM |
4134 | static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) |
4135 | { | |
4136 | int i; | |
4137 | ||
4138 | if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { | |
4139 | DP_VERBOSE(cdev, QED_MSG_IOV, | |
4140 | "Cannot set a VF MAC; Sriov is not enabled\n"); | |
4141 | return -EINVAL; | |
4142 | } | |
4143 | ||
7eff82b0 | 4144 | if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { |
08feecd7 YM |
4145 | DP_VERBOSE(cdev, QED_MSG_IOV, |
4146 | "Cannot set VF[%d] MAC (VF is not active)\n", vfid); | |
4147 | return -EINVAL; | |
4148 | } | |
4149 | ||
4150 | for_each_hwfn(cdev, i) { | |
4151 | struct qed_hwfn *hwfn = &cdev->hwfns[i]; | |
4152 | struct qed_public_vf_info *vf_info; | |
4153 | ||
4154 | vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); | |
4155 | if (!vf_info) | |
4156 | continue; | |
4157 | ||
4158 | /* Set the forced vlan, and schedule the IOV task */ | |
4159 | vf_info->forced_vlan = vid; | |
4160 | qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); | |
4161 | } | |
4162 | ||
4163 | return 0; | |
4164 | } | |
4165 | ||
73390ac9 YM |
4166 | static int qed_get_vf_config(struct qed_dev *cdev, |
4167 | int vf_id, struct ifla_vf_info *ivi) | |
4168 | { | |
4169 | struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); | |
4170 | struct qed_public_vf_info *vf_info; | |
4171 | struct qed_mcp_link_state link; | |
4172 | u32 tx_rate; | |
4173 | ||
4174 | /* Sanitize request */ | |
4175 | if (IS_VF(cdev)) | |
4176 | return -EINVAL; | |
4177 | ||
7eff82b0 | 4178 | if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) { |
73390ac9 YM |
4179 | DP_VERBOSE(cdev, QED_MSG_IOV, |
4180 | "VF index [%d] isn't active\n", vf_id); | |
4181 | return -EINVAL; | |
4182 | } | |
4183 | ||
4184 | vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); | |
4185 | ||
4186 | qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); | |
4187 | ||
4188 | /* Fill information about VF */ | |
4189 | ivi->vf = vf_id; | |
4190 | ||
4191 | if (is_valid_ether_addr(vf_info->forced_mac)) | |
4192 | ether_addr_copy(ivi->mac, vf_info->forced_mac); | |
4193 | else | |
4194 | ether_addr_copy(ivi->mac, vf_info->mac); | |
4195 | ||
4196 | ivi->vlan = vf_info->forced_vlan; | |
4197 | ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id); | |
4198 | ivi->linkstate = vf_info->link_state; | |
4199 | tx_rate = vf_info->tx_rate; | |
4200 | ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; | |
4201 | ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); | |
4202 | ||
4203 | return 0; | |
4204 | } | |
4205 | ||
36558c3d YM |
4206 | void qed_inform_vf_link_state(struct qed_hwfn *hwfn) |
4207 | { | |
e50728ef | 4208 | struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev); |
36558c3d YM |
4209 | struct qed_mcp_link_capabilities caps; |
4210 | struct qed_mcp_link_params params; | |
4211 | struct qed_mcp_link_state link; | |
4212 | int i; | |
4213 | ||
4214 | if (!hwfn->pf_iov_info) | |
4215 | return; | |
4216 | ||
4217 | /* Update bulletin of all future possible VFs with link configuration */ | |
4218 | for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) { | |
733def6a YM |
4219 | struct qed_public_vf_info *vf_info; |
4220 | ||
4221 | vf_info = qed_iov_get_public_vf_info(hwfn, i, false); | |
4222 | if (!vf_info) | |
4223 | continue; | |
4224 | ||
e50728ef MY |
4225 | /* Only hwfn0 is actually interested in the link speed. |
4226 | * But since only it would receive an MFW indication of link, | |
4227 | * need to take configuration from it - otherwise things like | |
4228 | * rate limiting for hwfn1 VF would not work. | |
4229 | */ | |
4230 | memcpy(¶ms, qed_mcp_get_link_params(lead_hwfn), | |
4231 | sizeof(params)); | |
4232 | memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link)); | |
4233 | memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn), | |
36558c3d YM |
4234 | sizeof(caps)); |
4235 | ||
733def6a YM |
4236 | /* Modify link according to the VF's configured link state */ |
4237 | switch (vf_info->link_state) { | |
4238 | case IFLA_VF_LINK_STATE_DISABLE: | |
4239 | link.link_up = false; | |
4240 | break; | |
4241 | case IFLA_VF_LINK_STATE_ENABLE: | |
4242 | link.link_up = true; | |
4243 | /* Set speed according to maximum supported by HW. | |
4244 | * that is 40G for regular devices and 100G for CMT | |
4245 | * mode devices. | |
4246 | */ | |
4247 | link.speed = (hwfn->cdev->num_hwfns > 1) ? | |
4248 | 100000 : 40000; | |
4249 | default: | |
4250 | /* In auto mode pass PF link image to VF */ | |
4251 | break; | |
4252 | } | |
4253 | ||
4254 | if (link.link_up && vf_info->tx_rate) { | |
4255 | struct qed_ptt *ptt; | |
4256 | int rate; | |
4257 | ||
4258 | rate = min_t(int, vf_info->tx_rate, link.speed); | |
4259 | ||
4260 | ptt = qed_ptt_acquire(hwfn); | |
4261 | if (!ptt) { | |
4262 | DP_NOTICE(hwfn, "Failed to acquire PTT\n"); | |
4263 | return; | |
4264 | } | |
4265 | ||
4266 | if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) { | |
4267 | vf_info->tx_rate = rate; | |
4268 | link.speed = rate; | |
4269 | } | |
4270 | ||
4271 | qed_ptt_release(hwfn, ptt); | |
4272 | } | |
4273 | ||
36558c3d YM |
4274 | qed_iov_set_link(hwfn, i, ¶ms, &link, &caps); |
4275 | } | |
4276 | ||
4277 | qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); | |
4278 | } | |
4279 | ||
733def6a YM |
4280 | static int qed_set_vf_link_state(struct qed_dev *cdev, |
4281 | int vf_id, int link_state) | |
4282 | { | |
4283 | int i; | |
4284 | ||
4285 | /* Sanitize request */ | |
4286 | if (IS_VF(cdev)) | |
4287 | return -EINVAL; | |
4288 | ||
7eff82b0 | 4289 | if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) { |
733def6a YM |
4290 | DP_VERBOSE(cdev, QED_MSG_IOV, |
4291 | "VF index [%d] isn't active\n", vf_id); | |
4292 | return -EINVAL; | |
4293 | } | |
4294 | ||
4295 | /* Handle configuration of link state */ | |
4296 | for_each_hwfn(cdev, i) { | |
4297 | struct qed_hwfn *hwfn = &cdev->hwfns[i]; | |
4298 | struct qed_public_vf_info *vf; | |
4299 | ||
4300 | vf = qed_iov_get_public_vf_info(hwfn, vf_id, true); | |
4301 | if (!vf) | |
4302 | continue; | |
4303 | ||
4304 | if (vf->link_state == link_state) | |
4305 | continue; | |
4306 | ||
4307 | vf->link_state = link_state; | |
4308 | qed_inform_vf_link_state(&cdev->hwfns[i]); | |
4309 | } | |
4310 | ||
4311 | return 0; | |
4312 | } | |
4313 | ||
6ddc7608 YM |
4314 | static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val) |
4315 | { | |
4316 | int i, rc = -EINVAL; | |
4317 | ||
4318 | for_each_hwfn(cdev, i) { | |
4319 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | |
4320 | ||
4321 | rc = qed_iov_spoofchk_set(p_hwfn, vfid, val); | |
4322 | if (rc) | |
4323 | break; | |
4324 | } | |
4325 | ||
4326 | return rc; | |
4327 | } | |
4328 | ||
733def6a YM |
4329 | static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate) |
4330 | { | |
4331 | int i; | |
4332 | ||
4333 | for_each_hwfn(cdev, i) { | |
4334 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | |
4335 | struct qed_public_vf_info *vf; | |
4336 | ||
4337 | if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { | |
4338 | DP_NOTICE(p_hwfn, | |
4339 | "SR-IOV sanity check failed, can't set tx rate\n"); | |
4340 | return -EINVAL; | |
4341 | } | |
4342 | ||
4343 | vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true); | |
4344 | ||
4345 | vf->tx_rate = rate; | |
4346 | ||
4347 | qed_inform_vf_link_state(p_hwfn); | |
4348 | } | |
4349 | ||
4350 | return 0; | |
4351 | } | |
4352 | ||
4353 | static int qed_set_vf_rate(struct qed_dev *cdev, | |
4354 | int vfid, u32 min_rate, u32 max_rate) | |
4355 | { | |
4356 | int rc_min = 0, rc_max = 0; | |
4357 | ||
4358 | if (max_rate) | |
4359 | rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate); | |
4360 | ||
4361 | if (min_rate) | |
4362 | rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate); | |
4363 | ||
4364 | if (rc_max | rc_min) | |
4365 | return -EINVAL; | |
4366 | ||
4367 | return 0; | |
4368 | } | |
4369 | ||
f990c82c MY |
4370 | static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust) |
4371 | { | |
4372 | int i; | |
4373 | ||
4374 | for_each_hwfn(cdev, i) { | |
4375 | struct qed_hwfn *hwfn = &cdev->hwfns[i]; | |
4376 | struct qed_public_vf_info *vf; | |
4377 | ||
4378 | if (!qed_iov_pf_sanity_check(hwfn, vfid)) { | |
4379 | DP_NOTICE(hwfn, | |
4380 | "SR-IOV sanity check failed, can't set trust\n"); | |
4381 | return -EINVAL; | |
4382 | } | |
4383 | ||
4384 | vf = qed_iov_get_public_vf_info(hwfn, vfid, true); | |
4385 | ||
4386 | if (vf->is_trusted_request == trust) | |
4387 | return 0; | |
4388 | vf->is_trusted_request = trust; | |
4389 | ||
4390 | qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG); | |
4391 | } | |
4392 | ||
4393 | return 0; | |
4394 | } | |
4395 | ||
37bff2b9 YM |
4396 | static void qed_handle_vf_msg(struct qed_hwfn *hwfn) |
4397 | { | |
4398 | u64 events[QED_VF_ARRAY_LENGTH]; | |
4399 | struct qed_ptt *ptt; | |
4400 | int i; | |
4401 | ||
4402 | ptt = qed_ptt_acquire(hwfn); | |
4403 | if (!ptt) { | |
4404 | DP_VERBOSE(hwfn, QED_MSG_IOV, | |
4405 | "Can't acquire PTT; re-scheduling\n"); | |
4406 | qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG); | |
4407 | return; | |
4408 | } | |
4409 | ||
fd3c615a | 4410 | qed_iov_pf_get_pending_events(hwfn, events); |
37bff2b9 YM |
4411 | |
4412 | DP_VERBOSE(hwfn, QED_MSG_IOV, | |
4413 | "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", | |
4414 | events[0], events[1], events[2]); | |
4415 | ||
4416 | qed_for_each_vf(hwfn, i) { | |
4417 | /* Skip VFs with no pending messages */ | |
4418 | if (!(events[i / 64] & (1ULL << (i % 64)))) | |
4419 | continue; | |
4420 | ||
4421 | DP_VERBOSE(hwfn, QED_MSG_IOV, | |
4422 | "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", | |
4423 | i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); | |
4424 | ||
4425 | /* Copy VF's message to PF's request buffer for that VF */ | |
4426 | if (qed_iov_copy_vf_msg(hwfn, ptt, i)) | |
4427 | continue; | |
4428 | ||
4429 | qed_iov_process_mbx_req(hwfn, ptt, i); | |
4430 | } | |
4431 | ||
4432 | qed_ptt_release(hwfn, ptt); | |
4433 | } | |
4434 | ||
08feecd7 YM |
4435 | static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) |
4436 | { | |
4437 | int i; | |
4438 | ||
4439 | qed_for_each_vf(hwfn, i) { | |
4440 | struct qed_public_vf_info *info; | |
4441 | bool update = false; | |
eff16960 | 4442 | u8 *mac; |
08feecd7 YM |
4443 | |
4444 | info = qed_iov_get_public_vf_info(hwfn, i, true); | |
4445 | if (!info) | |
4446 | continue; | |
4447 | ||
4448 | /* Update data on bulletin board */ | |
eff16960 YM |
4449 | mac = qed_iov_bulletin_get_forced_mac(hwfn, i); |
4450 | if (is_valid_ether_addr(info->forced_mac) && | |
4451 | (!mac || !ether_addr_equal(mac, info->forced_mac))) { | |
4452 | DP_VERBOSE(hwfn, | |
4453 | QED_MSG_IOV, | |
4454 | "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n", | |
4455 | i, | |
4456 | hwfn->cdev->p_iov_info->first_vf_in_pf + i); | |
4457 | ||
4458 | /* Update bulletin board with forced MAC */ | |
4459 | qed_iov_bulletin_set_forced_mac(hwfn, | |
4460 | info->forced_mac, i); | |
4461 | update = true; | |
4462 | } | |
08feecd7 YM |
4463 | |
4464 | if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^ | |
4465 | info->forced_vlan) { | |
4466 | DP_VERBOSE(hwfn, | |
4467 | QED_MSG_IOV, | |
4468 | "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n", | |
4469 | info->forced_vlan, | |
4470 | i, | |
4471 | hwfn->cdev->p_iov_info->first_vf_in_pf + i); | |
4472 | qed_iov_bulletin_set_forced_vlan(hwfn, | |
4473 | info->forced_vlan, i); | |
4474 | update = true; | |
4475 | } | |
4476 | ||
4477 | if (update) | |
4478 | qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); | |
4479 | } | |
4480 | } | |
4481 | ||
36558c3d YM |
4482 | static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) |
4483 | { | |
4484 | struct qed_ptt *ptt; | |
4485 | int i; | |
4486 | ||
4487 | ptt = qed_ptt_acquire(hwfn); | |
4488 | if (!ptt) { | |
4489 | DP_NOTICE(hwfn, "Failed allocating a ptt entry\n"); | |
4490 | qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); | |
4491 | return; | |
4492 | } | |
4493 | ||
4494 | qed_for_each_vf(hwfn, i) | |
4495 | qed_iov_post_vf_bulletin(hwfn, i, ptt); | |
4496 | ||
4497 | qed_ptt_release(hwfn, ptt); | |
4498 | } | |
4499 | ||
f990c82c MY |
4500 | static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) |
4501 | { | |
4502 | struct qed_sp_vport_update_params params; | |
4503 | struct qed_filter_accept_flags *flags; | |
4504 | struct qed_public_vf_info *vf_info; | |
4505 | struct qed_vf_info *vf; | |
4506 | u8 mask; | |
4507 | int i; | |
4508 | ||
4509 | mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; | |
4510 | flags = ¶ms.accept_flags; | |
4511 | ||
4512 | qed_for_each_vf(hwfn, i) { | |
4513 | /* Need to make sure current requested configuration didn't | |
4514 | * flip so that we'll end up configuring something that's not | |
4515 | * needed. | |
4516 | */ | |
4517 | vf_info = qed_iov_get_public_vf_info(hwfn, i, true); | |
4518 | if (vf_info->is_trusted_configured == | |
4519 | vf_info->is_trusted_request) | |
4520 | continue; | |
4521 | vf_info->is_trusted_configured = vf_info->is_trusted_request; | |
4522 | ||
4523 | /* Validate that the VF has a configured vport */ | |
4524 | vf = qed_iov_get_vf_info(hwfn, i, true); | |
4525 | if (!vf->vport_instance) | |
4526 | continue; | |
4527 | ||
4528 | memset(¶ms, 0, sizeof(params)); | |
4529 | params.opaque_fid = vf->opaque_fid; | |
4530 | params.vport_id = vf->vport_id; | |
4531 | ||
4532 | if (vf_info->rx_accept_mode & mask) { | |
4533 | flags->update_rx_mode_config = 1; | |
4534 | flags->rx_accept_filter = vf_info->rx_accept_mode; | |
4535 | } | |
4536 | ||
4537 | if (vf_info->tx_accept_mode & mask) { | |
4538 | flags->update_tx_mode_config = 1; | |
4539 | flags->tx_accept_filter = vf_info->tx_accept_mode; | |
4540 | } | |
4541 | ||
4542 | /* Remove if needed; Otherwise this would set the mask */ | |
4543 | if (!vf_info->is_trusted_configured) { | |
4544 | flags->rx_accept_filter &= ~mask; | |
4545 | flags->tx_accept_filter &= ~mask; | |
4546 | } | |
4547 | ||
4548 | if (flags->update_rx_mode_config || | |
4549 | flags->update_tx_mode_config) | |
4550 | qed_sp_vport_update(hwfn, ¶ms, | |
4551 | QED_SPQ_MODE_EBLOCK, NULL); | |
4552 | } | |
4553 | } | |
4554 | ||
ba56947a BX |
4555 | static void qed_iov_pf_task(struct work_struct *work) |
4556 | ||
37bff2b9 YM |
4557 | { |
4558 | struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, | |
4559 | iov_task.work); | |
0b55e27d | 4560 | int rc; |
37bff2b9 YM |
4561 | |
4562 | if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) | |
4563 | return; | |
4564 | ||
0b55e27d YM |
4565 | if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) { |
4566 | struct qed_ptt *ptt = qed_ptt_acquire(hwfn); | |
4567 | ||
4568 | if (!ptt) { | |
4569 | qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); | |
4570 | return; | |
4571 | } | |
4572 | ||
4573 | rc = qed_iov_vf_flr_cleanup(hwfn, ptt); | |
4574 | if (rc) | |
4575 | qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); | |
4576 | ||
4577 | qed_ptt_release(hwfn, ptt); | |
4578 | } | |
4579 | ||
37bff2b9 YM |
4580 | if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) |
4581 | qed_handle_vf_msg(hwfn); | |
08feecd7 YM |
4582 | |
4583 | if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, | |
4584 | &hwfn->iov_task_flags)) | |
4585 | qed_handle_pf_set_vf_unicast(hwfn); | |
4586 | ||
36558c3d YM |
4587 | if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, |
4588 | &hwfn->iov_task_flags)) | |
4589 | qed_handle_bulletin_post(hwfn); | |
f990c82c MY |
4590 | |
4591 | if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags)) | |
4592 | qed_iov_handle_trust_change(hwfn); | |
37bff2b9 YM |
4593 | } |
4594 | ||
4595 | void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) | |
4596 | { | |
4597 | int i; | |
4598 | ||
4599 | for_each_hwfn(cdev, i) { | |
4600 | if (!cdev->hwfns[i].iov_wq) | |
4601 | continue; | |
4602 | ||
4603 | if (schedule_first) { | |
4604 | qed_schedule_iov(&cdev->hwfns[i], | |
4605 | QED_IOV_WQ_STOP_WQ_FLAG); | |
4606 | cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); | |
4607 | } | |
4608 | ||
4609 | flush_workqueue(cdev->hwfns[i].iov_wq); | |
4610 | destroy_workqueue(cdev->hwfns[i].iov_wq); | |
4611 | } | |
4612 | } | |
4613 | ||
4614 | int qed_iov_wq_start(struct qed_dev *cdev) | |
4615 | { | |
4616 | char name[NAME_SIZE]; | |
4617 | int i; | |
4618 | ||
4619 | for_each_hwfn(cdev, i) { | |
4620 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | |
4621 | ||
36558c3d YM |
4622 | /* PFs needs a dedicated workqueue only if they support IOV. |
4623 | * VFs always require one. | |
4624 | */ | |
4625 | if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn)) | |
37bff2b9 YM |
4626 | continue; |
4627 | ||
4628 | snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x", | |
4629 | cdev->pdev->bus->number, | |
4630 | PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id); | |
4631 | ||
4632 | p_hwfn->iov_wq = create_singlethread_workqueue(name); | |
4633 | if (!p_hwfn->iov_wq) { | |
4634 | DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n"); | |
4635 | return -ENOMEM; | |
4636 | } | |
4637 | ||
36558c3d YM |
4638 | if (IS_PF(cdev)) |
4639 | INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); | |
4640 | else | |
4641 | INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task); | |
37bff2b9 YM |
4642 | } |
4643 | ||
4644 | return 0; | |
4645 | } | |
0b55e27d YM |
4646 | |
4647 | const struct qed_iov_hv_ops qed_iov_ops_pass = { | |
4648 | .configure = &qed_sriov_configure, | |
eff16960 | 4649 | .set_mac = &qed_sriov_pf_set_mac, |
08feecd7 | 4650 | .set_vlan = &qed_sriov_pf_set_vlan, |
73390ac9 | 4651 | .get_config = &qed_get_vf_config, |
733def6a | 4652 | .set_link_state = &qed_set_vf_link_state, |
6ddc7608 | 4653 | .set_spoof = &qed_spoof_configure, |
733def6a | 4654 | .set_rate = &qed_set_vf_rate, |
f990c82c | 4655 | .set_trust = &qed_set_vf_trust, |
0b55e27d | 4656 | }; |