]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/qlogic/qed/qed_sriov.c
qed: IOV configure and FLR
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / qlogic / qed / qed_sriov.c
CommitLineData
32a47e72
YM
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
0b55e27d 9#include <linux/qed/qed_iov_if.h>
1408cc1f
YM
10#include "qed_cxt.h"
11#include "qed_hsi.h"
32a47e72 12#include "qed_hw.h"
1408cc1f 13#include "qed_init_ops.h"
32a47e72 14#include "qed_int.h"
1408cc1f 15#include "qed_mcp.h"
32a47e72 16#include "qed_reg_addr.h"
1408cc1f 17#include "qed_sp.h"
32a47e72
YM
18#include "qed_sriov.h"
19#include "qed_vf.h"
20
1408cc1f
YM
21/* IOV ramrods */
22static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
23 u32 concrete_vfid, u16 opaque_vfid)
24{
25 struct vf_start_ramrod_data *p_ramrod = NULL;
26 struct qed_spq_entry *p_ent = NULL;
27 struct qed_sp_init_data init_data;
28 int rc = -EINVAL;
29
30 /* Get SPQ entry */
31 memset(&init_data, 0, sizeof(init_data));
32 init_data.cid = qed_spq_get_cid(p_hwfn);
33 init_data.opaque_fid = opaque_vfid;
34 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
35
36 rc = qed_sp_init_request(p_hwfn, &p_ent,
37 COMMON_RAMROD_VF_START,
38 PROTOCOLID_COMMON, &init_data);
39 if (rc)
40 return rc;
41
42 p_ramrod = &p_ent->ramrod.vf_start;
43
44 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
45 p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid);
46
47 p_ramrod->personality = PERSONALITY_ETH;
48
49 return qed_spq_post(p_hwfn, p_ent, NULL);
50}
51
0b55e27d
YM
52static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
53 u32 concrete_vfid, u16 opaque_vfid)
54{
55 struct vf_stop_ramrod_data *p_ramrod = NULL;
56 struct qed_spq_entry *p_ent = NULL;
57 struct qed_sp_init_data init_data;
58 int rc = -EINVAL;
59
60 /* Get SPQ entry */
61 memset(&init_data, 0, sizeof(init_data));
62 init_data.cid = qed_spq_get_cid(p_hwfn);
63 init_data.opaque_fid = opaque_vfid;
64 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
65
66 rc = qed_sp_init_request(p_hwfn, &p_ent,
67 COMMON_RAMROD_VF_STOP,
68 PROTOCOLID_COMMON, &init_data);
69 if (rc)
70 return rc;
71
72 p_ramrod = &p_ent->ramrod.vf_stop;
73
74 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
75
76 return qed_spq_post(p_hwfn, p_ent, NULL);
77}
78
32a47e72
YM
79bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
80 int rel_vf_id, bool b_enabled_only)
81{
82 if (!p_hwfn->pf_iov_info) {
83 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
84 return false;
85 }
86
87 if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
88 (rel_vf_id < 0))
89 return false;
90
91 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
92 b_enabled_only)
93 return false;
94
95 return true;
96}
97
37bff2b9
YM
98static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
99 u16 relative_vf_id,
100 bool b_enabled_only)
101{
102 struct qed_vf_info *vf = NULL;
103
104 if (!p_hwfn->pf_iov_info) {
105 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
106 return NULL;
107 }
108
109 if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
110 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
111 else
112 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
113 relative_vf_id);
114
115 return vf;
116}
117
32a47e72
YM
118static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
119{
120 struct qed_hw_sriov_info *iov = cdev->p_iov_info;
121 int pos = iov->pos;
122
123 DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
124 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
125
126 pci_read_config_word(cdev->pdev,
127 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
128 pci_read_config_word(cdev->pdev,
129 pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
130
131 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
132 if (iov->num_vfs) {
133 DP_VERBOSE(cdev,
134 QED_MSG_IOV,
135 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
136 iov->num_vfs = 0;
137 }
138
139 pci_read_config_word(cdev->pdev,
140 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
141
142 pci_read_config_word(cdev->pdev,
143 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
144
145 pci_read_config_word(cdev->pdev,
146 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
147
148 pci_read_config_dword(cdev->pdev,
149 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
150
151 pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
152
153 pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
154
155 DP_VERBOSE(cdev,
156 QED_MSG_IOV,
157 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
158 iov->nres,
159 iov->cap,
160 iov->ctrl,
161 iov->total_vfs,
162 iov->initial_vfs,
163 iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
164
165 /* Some sanity checks */
166 if (iov->num_vfs > NUM_OF_VFS(cdev) ||
167 iov->total_vfs > NUM_OF_VFS(cdev)) {
168 /* This can happen only due to a bug. In this case we set
169 * num_vfs to zero to avoid memory corruption in the code that
170 * assumes max number of vfs
171 */
172 DP_NOTICE(cdev,
173 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
174 iov->num_vfs);
175
176 iov->num_vfs = 0;
177 iov->total_vfs = 0;
178 }
179
180 return 0;
181}
182
183static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
184 struct qed_ptt *p_ptt)
185{
186 struct qed_igu_block *p_sb;
187 u16 sb_id;
188 u32 val;
189
190 if (!p_hwfn->hw_info.p_igu_info) {
191 DP_ERR(p_hwfn,
192 "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
193 return;
194 }
195
196 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
197 sb_id++) {
198 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
199 if ((p_sb->status & QED_IGU_STATUS_FREE) &&
200 !(p_sb->status & QED_IGU_STATUS_PF)) {
201 val = qed_rd(p_hwfn, p_ptt,
202 IGU_REG_MAPPING_MEMORY + sb_id * 4);
203 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
204 qed_wr(p_hwfn, p_ptt,
205 IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
206 }
207 }
208}
209
210static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
211{
212 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
213 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
214 struct qed_bulletin_content *p_bulletin_virt;
215 dma_addr_t req_p, rply_p, bulletin_p;
216 union pfvf_tlvs *p_reply_virt_addr;
217 union vfpf_tlvs *p_req_virt_addr;
218 u8 idx = 0;
219
220 memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
221
222 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
223 req_p = p_iov_info->mbx_msg_phys_addr;
224 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
225 rply_p = p_iov_info->mbx_reply_phys_addr;
226 p_bulletin_virt = p_iov_info->p_bulletins;
227 bulletin_p = p_iov_info->bulletins_phys;
228 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
229 DP_ERR(p_hwfn,
230 "qed_iov_setup_vfdb called without allocating mem first\n");
231 return;
232 }
233
234 for (idx = 0; idx < p_iov->total_vfs; idx++) {
235 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
236 u32 concrete;
237
238 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
239 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
240 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
241 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
242
243 vf->state = VF_STOPPED;
244 vf->b_init = false;
245
246 vf->bulletin.phys = idx *
247 sizeof(struct qed_bulletin_content) +
248 bulletin_p;
249 vf->bulletin.p_virt = p_bulletin_virt + idx;
250 vf->bulletin.size = sizeof(struct qed_bulletin_content);
251
252 vf->relative_vf_id = idx;
253 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
254 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
255 vf->concrete_fid = concrete;
256 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
257 (vf->abs_vf_id << 8);
258 vf->vport_id = idx + 1;
259 }
260}
261
262static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
263{
264 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
265 void **p_v_addr;
266 u16 num_vfs = 0;
267
268 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
269
270 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
271 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
272
273 /* Allocate PF Mailbox buffer (per-VF) */
274 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
275 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
276 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
277 p_iov_info->mbx_msg_size,
278 &p_iov_info->mbx_msg_phys_addr,
279 GFP_KERNEL);
280 if (!*p_v_addr)
281 return -ENOMEM;
282
283 /* Allocate PF Mailbox Reply buffer (per-VF) */
284 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
285 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
286 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
287 p_iov_info->mbx_reply_size,
288 &p_iov_info->mbx_reply_phys_addr,
289 GFP_KERNEL);
290 if (!*p_v_addr)
291 return -ENOMEM;
292
293 p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
294 num_vfs;
295 p_v_addr = &p_iov_info->p_bulletins;
296 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
297 p_iov_info->bulletins_size,
298 &p_iov_info->bulletins_phys,
299 GFP_KERNEL);
300 if (!*p_v_addr)
301 return -ENOMEM;
302
303 DP_VERBOSE(p_hwfn,
304 QED_MSG_IOV,
305 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
306 p_iov_info->mbx_msg_virt_addr,
307 (u64) p_iov_info->mbx_msg_phys_addr,
308 p_iov_info->mbx_reply_virt_addr,
309 (u64) p_iov_info->mbx_reply_phys_addr,
310 p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
311
312 return 0;
313}
314
315static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
316{
317 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
318
319 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
320 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
321 p_iov_info->mbx_msg_size,
322 p_iov_info->mbx_msg_virt_addr,
323 p_iov_info->mbx_msg_phys_addr);
324
325 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
326 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
327 p_iov_info->mbx_reply_size,
328 p_iov_info->mbx_reply_virt_addr,
329 p_iov_info->mbx_reply_phys_addr);
330
331 if (p_iov_info->p_bulletins)
332 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
333 p_iov_info->bulletins_size,
334 p_iov_info->p_bulletins,
335 p_iov_info->bulletins_phys);
336}
337
338int qed_iov_alloc(struct qed_hwfn *p_hwfn)
339{
340 struct qed_pf_iov *p_sriov;
341
342 if (!IS_PF_SRIOV(p_hwfn)) {
343 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
344 "No SR-IOV - no need for IOV db\n");
345 return 0;
346 }
347
348 p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
349 if (!p_sriov) {
350 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
351 return -ENOMEM;
352 }
353
354 p_hwfn->pf_iov_info = p_sriov;
355
356 return qed_iov_allocate_vfdb(p_hwfn);
357}
358
359void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
360{
361 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
362 return;
363
364 qed_iov_setup_vfdb(p_hwfn);
365 qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
366}
367
368void qed_iov_free(struct qed_hwfn *p_hwfn)
369{
370 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
371 qed_iov_free_vfdb(p_hwfn);
372 kfree(p_hwfn->pf_iov_info);
373 }
374}
375
376void qed_iov_free_hw_info(struct qed_dev *cdev)
377{
378 kfree(cdev->p_iov_info);
379 cdev->p_iov_info = NULL;
380}
381
382int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
383{
384 struct qed_dev *cdev = p_hwfn->cdev;
385 int pos;
386 int rc;
387
1408cc1f
YM
388 if (IS_VF(p_hwfn->cdev))
389 return 0;
390
32a47e72
YM
391 /* Learn the PCI configuration */
392 pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
393 PCI_EXT_CAP_ID_SRIOV);
394 if (!pos) {
395 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
396 return 0;
397 }
398
399 /* Allocate a new struct for IOV information */
400 cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
401 if (!cdev->p_iov_info) {
402 DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
403 return -ENOMEM;
404 }
405 cdev->p_iov_info->pos = pos;
406
407 rc = qed_iov_pci_cfg_info(cdev);
408 if (rc)
409 return rc;
410
411 /* We want PF IOV to be synonemous with the existance of p_iov_info;
412 * In case the capability is published but there are no VFs, simply
413 * de-allocate the struct.
414 */
415 if (!cdev->p_iov_info->total_vfs) {
416 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
417 "IOV capabilities, but no VFs are published\n");
418 kfree(cdev->p_iov_info);
419 cdev->p_iov_info = NULL;
420 return 0;
421 }
422
423 /* Calculate the first VF index - this is a bit tricky; Basically,
424 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
425 * after the first engine's VFs.
426 */
427 cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
428 p_hwfn->abs_pf_id - 16;
429 if (QED_PATH_ID(p_hwfn))
430 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
431
432 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
433 "First VF in hwfn 0x%08x\n",
434 cdev->p_iov_info->first_vf_in_pf);
435
436 return 0;
437}
438
37bff2b9
YM
439static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
440{
441 /* Check PF supports sriov */
442 if (!IS_QED_SRIOV(p_hwfn->cdev) || !IS_PF_SRIOV_ALLOC(p_hwfn))
443 return false;
444
445 /* Check VF validity */
1408cc1f
YM
446 if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
447 !IS_PF_SRIOV_ALLOC(p_hwfn))
37bff2b9
YM
448 return false;
449
450 return true;
451}
452
0b55e27d
YM
453static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
454 u16 rel_vf_id, u8 to_disable)
455{
456 struct qed_vf_info *vf;
457 int i;
458
459 for_each_hwfn(cdev, i) {
460 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
461
462 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
463 if (!vf)
464 continue;
465
466 vf->to_disable = to_disable;
467 }
468}
469
470void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
471{
472 u16 i;
473
474 if (!IS_QED_SRIOV(cdev))
475 return;
476
477 for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
478 qed_iov_set_vf_to_disable(cdev, i, to_disable);
479}
480
1408cc1f
YM
481static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
482 struct qed_ptt *p_ptt, u8 abs_vfid)
483{
484 qed_wr(p_hwfn, p_ptt,
485 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
486 1 << (abs_vfid & 0x1f));
487}
488
0b55e27d
YM
489static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
490 struct qed_ptt *p_ptt,
491 struct qed_vf_info *vf, bool enable)
492{
493 u32 igu_vf_conf;
494
495 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
496
497 igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
498
499 if (enable)
500 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
501 else
502 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
503
504 qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
505
506 /* unpretend */
507 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
508}
509
1408cc1f
YM
510static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
511 struct qed_ptt *p_ptt,
512 struct qed_vf_info *vf)
513{
514 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
515 int rc;
516
0b55e27d
YM
517 if (vf->to_disable)
518 return 0;
519
1408cc1f
YM
520 DP_VERBOSE(p_hwfn,
521 QED_MSG_IOV,
522 "Enable internal access for vf %x [abs %x]\n",
523 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
524
525 qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
526
527 rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
528 if (rc)
529 return rc;
530
531 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
532
533 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
534 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
535
536 qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
537 p_hwfn->hw_info.hw_mode);
538
539 /* unpretend */
540 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
541
542 if (vf->state != VF_STOPPED) {
543 DP_NOTICE(p_hwfn, "VF[%02x] is already started\n",
544 vf->abs_vf_id);
545 return -EINVAL;
546 }
547
548 /* Start VF */
549 rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
550 if (rc)
551 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
552
553 vf->state = VF_FREE;
554
555 return rc;
556}
557
0b55e27d
YM
558/**
559 * @brief qed_iov_config_perm_table - configure the permission
560 * zone table.
561 * In E4, queue zone permission table size is 320x9. There
562 * are 320 VF queues for single engine device (256 for dual
563 * engine device), and each entry has the following format:
564 * {Valid, VF[7:0]}
565 * @param p_hwfn
566 * @param p_ptt
567 * @param vf
568 * @param enable
569 */
570static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
571 struct qed_ptt *p_ptt,
572 struct qed_vf_info *vf, u8 enable)
573{
574 u32 reg_addr, val;
575 u16 qzone_id = 0;
576 int qid;
577
578 for (qid = 0; qid < vf->num_rxqs; qid++) {
579 qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
580 &qzone_id);
581
582 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
583 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
584 qed_wr(p_hwfn, p_ptt, reg_addr, val);
585 }
586}
587
1408cc1f
YM
588static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
589 struct qed_ptt *p_ptt,
590 struct qed_vf_info *vf, u16 num_rx_queues)
591{
592 struct qed_igu_block *igu_blocks;
593 int qid = 0, igu_id = 0;
594 u32 val = 0;
595
596 igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
597
598 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
599 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
600 p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
601
602 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
603 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
604 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
605
606 while ((qid < num_rx_queues) &&
607 (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
608 if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
609 struct cau_sb_entry sb_entry;
610
611 vf->igu_sbs[qid] = (u16)igu_id;
612 igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
613
614 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
615
616 qed_wr(p_hwfn, p_ptt,
617 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
618 val);
619
620 /* Configure igu sb in CAU which were marked valid */
621 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
622 p_hwfn->rel_pf_id,
623 vf->abs_vf_id, 1);
624 qed_dmae_host2grc(p_hwfn, p_ptt,
625 (u64)(uintptr_t)&sb_entry,
626 CAU_REG_SB_VAR_MEMORY +
627 igu_id * sizeof(u64), 2, 0);
628 qid++;
629 }
630 igu_id++;
631 }
632
633 vf->num_sbs = (u8) num_rx_queues;
634
635 return vf->num_sbs;
636}
637
0b55e27d
YM
638static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
639 struct qed_ptt *p_ptt,
640 struct qed_vf_info *vf)
641{
642 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
643 int idx, igu_id;
644 u32 addr, val;
645
646 /* Invalidate igu CAM lines and mark them as free */
647 for (idx = 0; idx < vf->num_sbs; idx++) {
648 igu_id = vf->igu_sbs[idx];
649 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
650
651 val = qed_rd(p_hwfn, p_ptt, addr);
652 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
653 qed_wr(p_hwfn, p_ptt, addr, val);
654
655 p_info->igu_map.igu_blocks[igu_id].status |=
656 QED_IGU_STATUS_FREE;
657
658 p_hwfn->hw_info.p_igu_info->free_blks++;
659 }
660
661 vf->num_sbs = 0;
662}
663
1408cc1f
YM
664static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
665 struct qed_ptt *p_ptt,
666 u16 rel_vf_id, u16 num_rx_queues)
667{
668 u8 num_of_vf_avaiable_chains = 0;
669 struct qed_vf_info *vf = NULL;
670 int rc = 0;
671 u32 cids;
672 u8 i;
673
674 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
675 if (!vf) {
676 DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
677 return -EINVAL;
678 }
679
680 if (vf->b_init) {
681 DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
682 return -EINVAL;
683 }
684
685 /* Limit number of queues according to number of CIDs */
686 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
687 DP_VERBOSE(p_hwfn,
688 QED_MSG_IOV,
689 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
690 vf->relative_vf_id, num_rx_queues, (u16) cids);
691 num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
692
693 num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
694 p_ptt,
695 vf,
696 num_rx_queues);
697 if (!num_of_vf_avaiable_chains) {
698 DP_ERR(p_hwfn, "no available igu sbs\n");
699 return -ENOMEM;
700 }
701
702 /* Choose queue number and index ranges */
703 vf->num_rxqs = num_of_vf_avaiable_chains;
704 vf->num_txqs = num_of_vf_avaiable_chains;
705
706 for (i = 0; i < vf->num_rxqs; i++) {
707 u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
708 vf->igu_sbs[i]);
709
710 if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
711 DP_NOTICE(p_hwfn,
712 "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
713 vf->relative_vf_id, queue_id);
714 return -EINVAL;
715 }
716
717 /* CIDs are per-VF, so no problem having them 0-based. */
718 vf->vf_queues[i].fw_rx_qid = queue_id;
719 vf->vf_queues[i].fw_tx_qid = queue_id;
720 vf->vf_queues[i].fw_cid = i;
721
722 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
723 "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
724 vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
725 }
726 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
727 if (!rc) {
728 vf->b_init = true;
729
730 if (IS_LEAD_HWFN(p_hwfn))
731 p_hwfn->cdev->p_iov_info->num_vfs++;
732 }
733
734 return rc;
735}
736
0b55e27d
YM
737static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
738 struct qed_ptt *p_ptt, u16 rel_vf_id)
739{
740 struct qed_vf_info *vf = NULL;
741 int rc = 0;
742
743 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
744 if (!vf) {
745 DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
746 return -EINVAL;
747 }
748
749 if (vf->state != VF_STOPPED) {
750 /* Stopping the VF */
751 rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
752
753 if (rc != 0) {
754 DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
755 rc);
756 return rc;
757 }
758
759 vf->state = VF_STOPPED;
760 }
761
762 /* disablng interrupts and resetting permission table was done during
763 * vf-close, however, we could get here without going through vf_close
764 */
765 /* Disable Interrupts for VF */
766 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
767
768 /* Reset Permission table */
769 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
770
771 vf->num_rxqs = 0;
772 vf->num_txqs = 0;
773 qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
774
775 if (vf->b_init) {
776 vf->b_init = false;
777
778 if (IS_LEAD_HWFN(p_hwfn))
779 p_hwfn->cdev->p_iov_info->num_vfs--;
780 }
781
782 return 0;
783}
784
37bff2b9
YM
785static bool qed_iov_tlv_supported(u16 tlvtype)
786{
787 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
788}
789
790/* place a given tlv on the tlv buffer, continuing current tlv list */
791void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
792{
793 struct channel_tlv *tl = (struct channel_tlv *)*offset;
794
795 tl->type = type;
796 tl->length = length;
797
798 /* Offset should keep pointing to next TLV (the end of the last) */
799 *offset += length;
800
801 /* Return a pointer to the start of the added tlv */
802 return *offset - length;
803}
804
805/* list the types and lengths of the tlvs on the buffer */
806void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
807{
808 u16 i = 1, total_length = 0;
809 struct channel_tlv *tlv;
810
811 do {
812 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
813
814 /* output tlv */
815 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
816 "TLV number %d: type %d, length %d\n",
817 i, tlv->type, tlv->length);
818
819 if (tlv->type == CHANNEL_TLV_LIST_END)
820 return;
821
822 /* Validate entry - protect against malicious VFs */
823 if (!tlv->length) {
824 DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
825 return;
826 }
827
828 total_length += tlv->length;
829
830 if (total_length >= sizeof(struct tlv_buffer_size)) {
831 DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
832 return;
833 }
834
835 i++;
836 } while (1);
837}
838
839static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
840 struct qed_ptt *p_ptt,
841 struct qed_vf_info *p_vf,
842 u16 length, u8 status)
843{
844 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
845 struct qed_dmae_params params;
846 u8 eng_vf_id;
847
848 mbx->reply_virt->default_resp.hdr.status = status;
849
850 qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
851
852 eng_vf_id = p_vf->abs_vf_id;
853
854 memset(&params, 0, sizeof(struct qed_dmae_params));
855 params.flags = QED_DMAE_FLAG_VF_DST;
856 params.dst_vfid = eng_vf_id;
857
858 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
859 mbx->req_virt->first_tlv.reply_address +
860 sizeof(u64),
861 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
862 &params);
863
864 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
865 mbx->req_virt->first_tlv.reply_address,
866 sizeof(u64) / 4, &params);
867
868 REG_WR(p_hwfn,
869 GTT_BAR0_MAP_REG_USDM_RAM +
870 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
871}
872
873static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
874 struct qed_ptt *p_ptt,
875 struct qed_vf_info *vf_info,
876 u16 type, u16 length, u8 status)
877{
878 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
879
880 mbx->offset = (u8 *)mbx->reply_virt;
881
882 qed_add_tlv(p_hwfn, &mbx->offset, type, length);
883 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
884 sizeof(struct channel_list_end_tlv));
885
886 qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
887}
888
0b55e27d
YM
889struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
890 u16 relative_vf_id,
891 bool b_enabled_only)
892{
893 struct qed_vf_info *vf = NULL;
894
895 vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
896 if (!vf)
897 return NULL;
898
899 return &vf->p_vf_info;
900}
901
902void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
903{
904 struct qed_public_vf_info *vf_info;
905
906 vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
907
908 if (!vf_info)
909 return;
910
911 /* Clear the VF mac */
912 memset(vf_info->mac, 0, ETH_ALEN);
913}
914
915static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
916 struct qed_vf_info *p_vf)
917{
918 u32 i;
919
920 p_vf->vf_bulletin = 0;
921 p_vf->num_mac_filters = 0;
922 p_vf->num_vlan_filters = 0;
923
924 /* If VF previously requested less resources, go back to default */
925 p_vf->num_rxqs = p_vf->num_sbs;
926 p_vf->num_txqs = p_vf->num_sbs;
927
928 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
929 p_vf->vf_queues[i].rxq_active = 0;
930
931 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
932}
933
1408cc1f
YM
934static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
935 struct qed_ptt *p_ptt,
936 struct qed_vf_info *vf)
37bff2b9 937{
1408cc1f
YM
938 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
939 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
940 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
941 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
942 u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
943 struct pf_vf_resc *resc = &resp->resc;
944
945 /* Validate FW compatibility */
946 if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
947 req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
948 req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
949 req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
950 DP_INFO(p_hwfn,
951 "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n",
952 vf->abs_vf_id,
953 req->vfdev_info.fw_major,
954 req->vfdev_info.fw_minor,
955 req->vfdev_info.fw_revision,
956 req->vfdev_info.fw_engineering,
957 FW_MAJOR_VERSION,
958 FW_MINOR_VERSION,
959 FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
960 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
961 goto out;
962 }
963
964 /* On 100g PFs, prevent old VFs from loading */
965 if ((p_hwfn->cdev->num_hwfns > 1) &&
966 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
967 DP_INFO(p_hwfn,
968 "VF[%d] is running an old driver that doesn't support 100g\n",
969 vf->abs_vf_id);
970 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
971 goto out;
972 }
973
974 memset(resp, 0, sizeof(*resp));
975
976 /* Fill in vf info stuff */
977 vf->opaque_fid = req->vfdev_info.opaque_fid;
978 vf->num_mac_filters = 1;
979 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
980
981 vf->vf_bulletin = req->bulletin_addr;
982 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
983 vf->bulletin.size : req->bulletin_size;
984
985 /* fill in pfdev info */
986 pfdev_info->chip_num = p_hwfn->cdev->chip_num;
987 pfdev_info->db_size = 0;
988 pfdev_info->indices_per_sb = PIS_PER_SB;
989
990 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
991 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
992 if (p_hwfn->cdev->num_hwfns > 1)
993 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
994
995 pfdev_info->stats_info.mstats.address =
996 PXP_VF_BAR0_START_MSDM_ZONE_B +
997 offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
998 pfdev_info->stats_info.mstats.len =
999 sizeof(struct eth_mstorm_per_queue_stat);
1000
1001 pfdev_info->stats_info.ustats.address =
1002 PXP_VF_BAR0_START_USDM_ZONE_B +
1003 offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
1004 pfdev_info->stats_info.ustats.len =
1005 sizeof(struct eth_ustorm_per_queue_stat);
1006
1007 pfdev_info->stats_info.pstats.address =
1008 PXP_VF_BAR0_START_PSDM_ZONE_B +
1009 offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
1010 pfdev_info->stats_info.pstats.len =
1011 sizeof(struct eth_pstorm_per_queue_stat);
1012
1013 pfdev_info->stats_info.tstats.address = 0;
1014 pfdev_info->stats_info.tstats.len = 0;
1015
1016 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1017
1018 pfdev_info->fw_major = FW_MAJOR_VERSION;
1019 pfdev_info->fw_minor = FW_MINOR_VERSION;
1020 pfdev_info->fw_rev = FW_REVISION_VERSION;
1021 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1022 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1023 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1024
1025 pfdev_info->dev_type = p_hwfn->cdev->type;
1026 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1027
1028 resc->num_rxqs = vf->num_rxqs;
1029 resc->num_txqs = vf->num_txqs;
1030 resc->num_sbs = vf->num_sbs;
1031 for (i = 0; i < resc->num_sbs; i++) {
1032 resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
1033 resc->hw_sbs[i].sb_qid = 0;
1034 }
1035
1036 for (i = 0; i < resc->num_rxqs; i++) {
1037 qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
1038 (u16 *)&resc->hw_qid[i]);
1039 resc->cid[i] = vf->vf_queues[i].fw_cid;
1040 }
1041
1042 resc->num_mac_filters = min_t(u8, vf->num_mac_filters,
1043 req->resc_request.num_mac_filters);
1044 resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters,
1045 req->resc_request.num_vlan_filters);
1046
1047 /* This isn't really required as VF isn't limited, but some VFs might
1048 * actually test this value, so need to provide it.
1049 */
1050 resc->num_mc_filters = req->resc_request.num_mc_filters;
1051
1052 /* Fill agreed size of bulletin board in response */
1053 resp->bulletin_size = vf->bulletin.size;
1054
1055 DP_VERBOSE(p_hwfn,
1056 QED_MSG_IOV,
1057 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1058 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1059 vf->abs_vf_id,
1060 resp->pfdev_info.chip_num,
1061 resp->pfdev_info.db_size,
1062 resp->pfdev_info.indices_per_sb,
1063 resp->pfdev_info.capabilities,
1064 resc->num_rxqs,
1065 resc->num_txqs,
1066 resc->num_sbs,
1067 resc->num_mac_filters,
1068 resc->num_vlan_filters);
1069 vf->state = VF_ACQUIRED;
1070
1071 /* Prepare Response */
1072out:
1073 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1074 sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
37bff2b9
YM
1075}
1076
0b55e27d
YM
1077static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
1078 struct qed_ptt *p_ptt,
1079 struct qed_vf_info *vf)
1080{
1081 int i;
1082
1083 /* Reset the SBs */
1084 for (i = 0; i < vf->num_sbs; i++)
1085 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
1086 vf->igu_sbs[i],
1087 vf->opaque_fid, false);
1088
1089 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
1090 sizeof(struct pfvf_def_resp_tlv),
1091 PFVF_STATUS_SUCCESS);
1092}
1093
1094static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
1095 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
1096{
1097 u16 length = sizeof(struct pfvf_def_resp_tlv);
1098 u8 status = PFVF_STATUS_SUCCESS;
1099
1100 /* Disable Interrupts for VF */
1101 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1102
1103 /* Reset Permission table */
1104 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1105
1106 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
1107 length, status);
1108}
1109
1110static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
1111 struct qed_ptt *p_ptt,
1112 struct qed_vf_info *p_vf)
1113{
1114 u16 length = sizeof(struct pfvf_def_resp_tlv);
1115
1116 qed_iov_vf_cleanup(p_hwfn, p_vf);
1117
1118 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
1119 length, PFVF_STATUS_SUCCESS);
1120}
1121
1122static int
1123qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
1124 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
1125{
1126 int cnt;
1127 u32 val;
1128
1129 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
1130
1131 for (cnt = 0; cnt < 50; cnt++) {
1132 val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
1133 if (!val)
1134 break;
1135 msleep(20);
1136 }
1137 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
1138
1139 if (cnt == 50) {
1140 DP_ERR(p_hwfn,
1141 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
1142 p_vf->abs_vf_id, val);
1143 return -EBUSY;
1144 }
1145
1146 return 0;
1147}
1148
1149static int
1150qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
1151 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
1152{
1153 u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
1154 int i, cnt;
1155
1156 /* Read initial consumers & producers */
1157 for (i = 0; i < MAX_NUM_VOQS; i++) {
1158 u32 prod;
1159
1160 cons[i] = qed_rd(p_hwfn, p_ptt,
1161 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
1162 i * 0x40);
1163 prod = qed_rd(p_hwfn, p_ptt,
1164 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
1165 i * 0x40);
1166 distance[i] = prod - cons[i];
1167 }
1168
1169 /* Wait for consumers to pass the producers */
1170 i = 0;
1171 for (cnt = 0; cnt < 50; cnt++) {
1172 for (; i < MAX_NUM_VOQS; i++) {
1173 u32 tmp;
1174
1175 tmp = qed_rd(p_hwfn, p_ptt,
1176 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
1177 i * 0x40);
1178 if (distance[i] > tmp - cons[i])
1179 break;
1180 }
1181
1182 if (i == MAX_NUM_VOQS)
1183 break;
1184
1185 msleep(20);
1186 }
1187
1188 if (cnt == 50) {
1189 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
1190 p_vf->abs_vf_id, i);
1191 return -EBUSY;
1192 }
1193
1194 return 0;
1195}
1196
1197static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
1198 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
1199{
1200 int rc;
1201
1202 rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
1203 if (rc)
1204 return rc;
1205
1206 rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
1207 if (rc)
1208 return rc;
1209
1210 return 0;
1211}
1212
1213static int
1214qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
1215 struct qed_ptt *p_ptt,
1216 u16 rel_vf_id, u32 *ack_vfs)
1217{
1218 struct qed_vf_info *p_vf;
1219 int rc = 0;
1220
1221 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
1222 if (!p_vf)
1223 return 0;
1224
1225 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
1226 (1ULL << (rel_vf_id % 64))) {
1227 u16 vfid = p_vf->abs_vf_id;
1228
1229 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1230 "VF[%d] - Handling FLR\n", vfid);
1231
1232 qed_iov_vf_cleanup(p_hwfn, p_vf);
1233
1234 /* If VF isn't active, no need for anything but SW */
1235 if (!p_vf->b_init)
1236 goto cleanup;
1237
1238 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
1239 if (rc)
1240 goto cleanup;
1241
1242 rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
1243 if (rc) {
1244 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
1245 return rc;
1246 }
1247
1248 /* VF_STOPPED has to be set only after final cleanup
1249 * but prior to re-enabling the VF.
1250 */
1251 p_vf->state = VF_STOPPED;
1252
1253 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
1254 if (rc) {
1255 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
1256 vfid);
1257 return rc;
1258 }
1259cleanup:
1260 /* Mark VF for ack and clean pending state */
1261 if (p_vf->state == VF_RESET)
1262 p_vf->state = VF_STOPPED;
1263 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
1264 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
1265 ~(1ULL << (rel_vf_id % 64));
1266 p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
1267 ~(1ULL << (rel_vf_id % 64));
1268 }
1269
1270 return rc;
1271}
1272
1273int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1274{
1275 u32 ack_vfs[VF_MAX_STATIC / 32];
1276 int rc = 0;
1277 u16 i;
1278
1279 memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
1280
1281 /* Since BRB <-> PRS interface can't be tested as part of the flr
1282 * polling due to HW limitations, simply sleep a bit. And since
1283 * there's no need to wait per-vf, do it before looping.
1284 */
1285 msleep(100);
1286
1287 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
1288 qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
1289
1290 rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
1291 return rc;
1292}
1293
1294int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
1295{
1296 u16 i, found = 0;
1297
1298 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
1299 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1300 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1301 "[%08x,...,%08x]: %08x\n",
1302 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
1303
1304 if (!p_hwfn->cdev->p_iov_info) {
1305 DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
1306 return 0;
1307 }
1308
1309 /* Mark VFs */
1310 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
1311 struct qed_vf_info *p_vf;
1312 u8 vfid;
1313
1314 p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
1315 if (!p_vf)
1316 continue;
1317
1318 vfid = p_vf->abs_vf_id;
1319 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
1320 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
1321 u16 rel_vf_id = p_vf->relative_vf_id;
1322
1323 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1324 "VF[%d] [rel %d] got FLR-ed\n",
1325 vfid, rel_vf_id);
1326
1327 p_vf->state = VF_RESET;
1328
1329 /* No need to lock here, since pending_flr should
1330 * only change here and before ACKing MFw. Since
1331 * MFW will not trigger an additional attention for
1332 * VF flr until ACKs, we're safe.
1333 */
1334 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
1335 found = 1;
1336 }
1337 }
1338
1339 return found;
1340}
1341
37bff2b9
YM
1342static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
1343 struct qed_ptt *p_ptt, int vfid)
1344{
1345 struct qed_iov_vf_mbx *mbx;
1346 struct qed_vf_info *p_vf;
1347 int i;
1348
1349 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
1350 if (!p_vf)
1351 return;
1352
1353 mbx = &p_vf->vf_mbx;
1354
1355 /* qed_iov_process_mbx_request */
1356 DP_VERBOSE(p_hwfn,
1357 QED_MSG_IOV,
1358 "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
1359
1360 mbx->first_tlv = mbx->req_virt->first_tlv;
1361
1362 /* check if tlv type is known */
1363 if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
1408cc1f
YM
1364 switch (mbx->first_tlv.tl.type) {
1365 case CHANNEL_TLV_ACQUIRE:
1366 qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
1367 break;
0b55e27d
YM
1368 case CHANNEL_TLV_CLOSE:
1369 qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
1370 break;
1371 case CHANNEL_TLV_INT_CLEANUP:
1372 qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
1373 break;
1374 case CHANNEL_TLV_RELEASE:
1375 qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
1376 break;
1408cc1f 1377 }
37bff2b9
YM
1378 } else {
1379 /* unknown TLV - this may belong to a VF driver from the future
1380 * - a version written after this PF driver was written, which
1381 * supports features unknown as of yet. Too bad since we don't
1382 * support them. Or this may be because someone wrote a crappy
1383 * VF driver and is sending garbage over the channel.
1384 */
1385 DP_ERR(p_hwfn,
1386 "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
1387 mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
1388
1389 for (i = 0; i < 20; i++) {
1390 DP_VERBOSE(p_hwfn,
1391 QED_MSG_IOV,
1392 "%x ",
1393 mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
1394 }
1395 }
1396}
1397
1398void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
1399{
1400 u64 add_bit = 1ULL << (vfid % 64);
1401
1402 p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
1403}
1404
1405static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
1406 u64 *events)
1407{
1408 u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
1409
1410 memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
1411 memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
1412}
1413
1414static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
1415 u16 abs_vfid, struct regpair *vf_msg)
1416{
1417 u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
1418 struct qed_vf_info *p_vf;
1419
1420 if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
1421 DP_VERBOSE(p_hwfn,
1422 QED_MSG_IOV,
1423 "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
1424 abs_vfid);
1425 return 0;
1426 }
1427 p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
1428
1429 /* List the physical address of the request so that handler
1430 * could later on copy the message from it.
1431 */
1432 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
1433
1434 /* Mark the event and schedule the workqueue */
1435 qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
1436 qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
1437
1438 return 0;
1439}
1440
1441int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
1442 u8 opcode, __le16 echo, union event_ring_data *data)
1443{
1444 switch (opcode) {
1445 case COMMON_EVENT_VF_PF_CHANNEL:
1446 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
1447 &data->vf_pf_channel.msg_addr);
1448 default:
1449 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
1450 opcode);
1451 return -EINVAL;
1452 }
1453}
1454
32a47e72
YM
1455u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
1456{
1457 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
1458 u16 i;
1459
1460 if (!p_iov)
1461 goto out;
1462
1463 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
1464 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
1465 return i;
1466
1467out:
1468 return MAX_NUM_VFS;
1469}
37bff2b9
YM
1470
1471static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
1472 int vfid)
1473{
1474 struct qed_dmae_params params;
1475 struct qed_vf_info *vf_info;
1476
1477 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
1478 if (!vf_info)
1479 return -EINVAL;
1480
1481 memset(&params, 0, sizeof(struct qed_dmae_params));
1482 params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
1483 params.src_vfid = vf_info->abs_vf_id;
1484
1485 if (qed_dmae_host2host(p_hwfn, ptt,
1486 vf_info->vf_mbx.pending_req,
1487 vf_info->vf_mbx.req_phys,
1488 sizeof(union vfpf_tlvs) / 4, &params)) {
1489 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1490 "Failed to copy message from VF 0x%02x\n", vfid);
1491
1492 return -EIO;
1493 }
1494
1495 return 0;
1496}
1497
0b55e27d
YM
1498bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
1499{
1500 struct qed_vf_info *p_vf_info;
1501
1502 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
1503 if (!p_vf_info)
1504 return true;
1505
1506 return p_vf_info->state == VF_STOPPED;
1507}
1508
37bff2b9
YM
1509/**
1510 * qed_schedule_iov - schedules IOV task for VF and PF
1511 * @hwfn: hardware function pointer
1512 * @flag: IOV flag for VF/PF
1513 */
1514void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
1515{
1516 smp_mb__before_atomic();
1517 set_bit(flag, &hwfn->iov_task_flags);
1518 smp_mb__after_atomic();
1519 DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
1520 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
1521}
1522
1408cc1f
YM
1523void qed_vf_start_iov_wq(struct qed_dev *cdev)
1524{
1525 int i;
1526
1527 for_each_hwfn(cdev, i)
1528 queue_delayed_work(cdev->hwfns[i].iov_wq,
1529 &cdev->hwfns[i].iov_task, 0);
1530}
1531
0b55e27d
YM
1532int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
1533{
1534 int i, j;
1535
1536 for_each_hwfn(cdev, i)
1537 if (cdev->hwfns[i].iov_wq)
1538 flush_workqueue(cdev->hwfns[i].iov_wq);
1539
1540 /* Mark VFs for disablement */
1541 qed_iov_set_vfs_to_disable(cdev, true);
1542
1543 if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
1544 pci_disable_sriov(cdev->pdev);
1545
1546 for_each_hwfn(cdev, i) {
1547 struct qed_hwfn *hwfn = &cdev->hwfns[i];
1548 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1549
1550 /* Failure to acquire the ptt in 100g creates an odd error
1551 * where the first engine has already relased IOV.
1552 */
1553 if (!ptt) {
1554 DP_ERR(hwfn, "Failed to acquire ptt\n");
1555 return -EBUSY;
1556 }
1557
1558 qed_for_each_vf(hwfn, j) {
1559 int k;
1560
1561 if (!qed_iov_is_valid_vfid(hwfn, j, true))
1562 continue;
1563
1564 /* Wait until VF is disabled before releasing */
1565 for (k = 0; k < 100; k++) {
1566 if (!qed_iov_is_vf_stopped(hwfn, j))
1567 msleep(20);
1568 else
1569 break;
1570 }
1571
1572 if (k < 100)
1573 qed_iov_release_hw_for_vf(&cdev->hwfns[i],
1574 ptt, j);
1575 else
1576 DP_ERR(hwfn,
1577 "Timeout waiting for VF's FLR to end\n");
1578 }
1579
1580 qed_ptt_release(hwfn, ptt);
1581 }
1582
1583 qed_iov_set_vfs_to_disable(cdev, false);
1584
1585 return 0;
1586}
1587
1588static int qed_sriov_enable(struct qed_dev *cdev, int num)
1589{
1590 struct qed_sb_cnt_info sb_cnt_info;
1591 int i, j, rc;
1592
1593 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
1594 DP_NOTICE(cdev, "Can start at most %d VFs\n",
1595 RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
1596 return -EINVAL;
1597 }
1598
1599 /* Initialize HW for VF access */
1600 for_each_hwfn(cdev, j) {
1601 struct qed_hwfn *hwfn = &cdev->hwfns[j];
1602 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1603 int num_sbs = 0, limit = 16;
1604
1605 if (!ptt) {
1606 DP_ERR(hwfn, "Failed to acquire ptt\n");
1607 rc = -EBUSY;
1608 goto err;
1609 }
1610
1611 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
1612 qed_int_get_num_sbs(hwfn, &sb_cnt_info);
1613 num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
1614
1615 for (i = 0; i < num; i++) {
1616 if (!qed_iov_is_valid_vfid(hwfn, i, false))
1617 continue;
1618
1619 rc = qed_iov_init_hw_for_vf(hwfn,
1620 ptt, i, num_sbs / num);
1621 if (rc) {
1622 DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
1623 qed_ptt_release(hwfn, ptt);
1624 goto err;
1625 }
1626 }
1627
1628 qed_ptt_release(hwfn, ptt);
1629 }
1630
1631 /* Enable SRIOV PCIe functions */
1632 rc = pci_enable_sriov(cdev->pdev, num);
1633 if (rc) {
1634 DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
1635 goto err;
1636 }
1637
1638 return num;
1639
1640err:
1641 qed_sriov_disable(cdev, false);
1642 return rc;
1643}
1644
1645static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
1646{
1647 if (!IS_QED_SRIOV(cdev)) {
1648 DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
1649 return -EOPNOTSUPP;
1650 }
1651
1652 if (num_vfs_param)
1653 return qed_sriov_enable(cdev, num_vfs_param);
1654 else
1655 return qed_sriov_disable(cdev, true);
1656}
1657
37bff2b9
YM
1658static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
1659{
1660 u64 events[QED_VF_ARRAY_LENGTH];
1661 struct qed_ptt *ptt;
1662 int i;
1663
1664 ptt = qed_ptt_acquire(hwfn);
1665 if (!ptt) {
1666 DP_VERBOSE(hwfn, QED_MSG_IOV,
1667 "Can't acquire PTT; re-scheduling\n");
1668 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
1669 return;
1670 }
1671
1672 qed_iov_pf_get_and_clear_pending_events(hwfn, events);
1673
1674 DP_VERBOSE(hwfn, QED_MSG_IOV,
1675 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
1676 events[0], events[1], events[2]);
1677
1678 qed_for_each_vf(hwfn, i) {
1679 /* Skip VFs with no pending messages */
1680 if (!(events[i / 64] & (1ULL << (i % 64))))
1681 continue;
1682
1683 DP_VERBOSE(hwfn, QED_MSG_IOV,
1684 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
1685 i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1686
1687 /* Copy VF's message to PF's request buffer for that VF */
1688 if (qed_iov_copy_vf_msg(hwfn, ptt, i))
1689 continue;
1690
1691 qed_iov_process_mbx_req(hwfn, ptt, i);
1692 }
1693
1694 qed_ptt_release(hwfn, ptt);
1695}
1696
1697void qed_iov_pf_task(struct work_struct *work)
1698{
1699 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1700 iov_task.work);
0b55e27d 1701 int rc;
37bff2b9
YM
1702
1703 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
1704 return;
1705
0b55e27d
YM
1706 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
1707 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1708
1709 if (!ptt) {
1710 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
1711 return;
1712 }
1713
1714 rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
1715 if (rc)
1716 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
1717
1718 qed_ptt_release(hwfn, ptt);
1719 }
1720
37bff2b9
YM
1721 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
1722 qed_handle_vf_msg(hwfn);
1723}
1724
1725void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
1726{
1727 int i;
1728
1729 for_each_hwfn(cdev, i) {
1730 if (!cdev->hwfns[i].iov_wq)
1731 continue;
1732
1733 if (schedule_first) {
1734 qed_schedule_iov(&cdev->hwfns[i],
1735 QED_IOV_WQ_STOP_WQ_FLAG);
1736 cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
1737 }
1738
1739 flush_workqueue(cdev->hwfns[i].iov_wq);
1740 destroy_workqueue(cdev->hwfns[i].iov_wq);
1741 }
1742}
1743
1744int qed_iov_wq_start(struct qed_dev *cdev)
1745{
1746 char name[NAME_SIZE];
1747 int i;
1748
1749 for_each_hwfn(cdev, i) {
1750 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1751
1752 /* PFs needs a dedicated workqueue only if they support IOV. */
1753 if (!IS_PF_SRIOV(p_hwfn))
1754 continue;
1755
1756 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
1757 cdev->pdev->bus->number,
1758 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
1759
1760 p_hwfn->iov_wq = create_singlethread_workqueue(name);
1761 if (!p_hwfn->iov_wq) {
1762 DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
1763 return -ENOMEM;
1764 }
1765
1766 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
1767 }
1768
1769 return 0;
1770}
0b55e27d
YM
1771
1772const struct qed_iov_hv_ops qed_iov_ops_pass = {
1773 .configure = &qed_sriov_configure,
1774};