]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/qlogic/qed/qed_sriov.c
qed: Introduce VFs
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / qlogic / qed / qed_sriov.c
CommitLineData
32a47e72
YM
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
1408cc1f
YM
9#include "qed_cxt.h"
10#include "qed_hsi.h"
32a47e72 11#include "qed_hw.h"
1408cc1f 12#include "qed_init_ops.h"
32a47e72 13#include "qed_int.h"
1408cc1f 14#include "qed_mcp.h"
32a47e72 15#include "qed_reg_addr.h"
1408cc1f 16#include "qed_sp.h"
32a47e72
YM
17#include "qed_sriov.h"
18#include "qed_vf.h"
19
1408cc1f
YM
20/* IOV ramrods */
21static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
22 u32 concrete_vfid, u16 opaque_vfid)
23{
24 struct vf_start_ramrod_data *p_ramrod = NULL;
25 struct qed_spq_entry *p_ent = NULL;
26 struct qed_sp_init_data init_data;
27 int rc = -EINVAL;
28
29 /* Get SPQ entry */
30 memset(&init_data, 0, sizeof(init_data));
31 init_data.cid = qed_spq_get_cid(p_hwfn);
32 init_data.opaque_fid = opaque_vfid;
33 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
34
35 rc = qed_sp_init_request(p_hwfn, &p_ent,
36 COMMON_RAMROD_VF_START,
37 PROTOCOLID_COMMON, &init_data);
38 if (rc)
39 return rc;
40
41 p_ramrod = &p_ent->ramrod.vf_start;
42
43 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
44 p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid);
45
46 p_ramrod->personality = PERSONALITY_ETH;
47
48 return qed_spq_post(p_hwfn, p_ent, NULL);
49}
50
32a47e72
YM
51bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
52 int rel_vf_id, bool b_enabled_only)
53{
54 if (!p_hwfn->pf_iov_info) {
55 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
56 return false;
57 }
58
59 if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
60 (rel_vf_id < 0))
61 return false;
62
63 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
64 b_enabled_only)
65 return false;
66
67 return true;
68}
69
37bff2b9
YM
70static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
71 u16 relative_vf_id,
72 bool b_enabled_only)
73{
74 struct qed_vf_info *vf = NULL;
75
76 if (!p_hwfn->pf_iov_info) {
77 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
78 return NULL;
79 }
80
81 if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
82 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
83 else
84 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
85 relative_vf_id);
86
87 return vf;
88}
89
32a47e72
YM
90static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
91{
92 struct qed_hw_sriov_info *iov = cdev->p_iov_info;
93 int pos = iov->pos;
94
95 DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
96 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
97
98 pci_read_config_word(cdev->pdev,
99 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
100 pci_read_config_word(cdev->pdev,
101 pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
102
103 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
104 if (iov->num_vfs) {
105 DP_VERBOSE(cdev,
106 QED_MSG_IOV,
107 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
108 iov->num_vfs = 0;
109 }
110
111 pci_read_config_word(cdev->pdev,
112 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
113
114 pci_read_config_word(cdev->pdev,
115 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
116
117 pci_read_config_word(cdev->pdev,
118 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
119
120 pci_read_config_dword(cdev->pdev,
121 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
122
123 pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
124
125 pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
126
127 DP_VERBOSE(cdev,
128 QED_MSG_IOV,
129 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
130 iov->nres,
131 iov->cap,
132 iov->ctrl,
133 iov->total_vfs,
134 iov->initial_vfs,
135 iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
136
137 /* Some sanity checks */
138 if (iov->num_vfs > NUM_OF_VFS(cdev) ||
139 iov->total_vfs > NUM_OF_VFS(cdev)) {
140 /* This can happen only due to a bug. In this case we set
141 * num_vfs to zero to avoid memory corruption in the code that
142 * assumes max number of vfs
143 */
144 DP_NOTICE(cdev,
145 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
146 iov->num_vfs);
147
148 iov->num_vfs = 0;
149 iov->total_vfs = 0;
150 }
151
152 return 0;
153}
154
155static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
156 struct qed_ptt *p_ptt)
157{
158 struct qed_igu_block *p_sb;
159 u16 sb_id;
160 u32 val;
161
162 if (!p_hwfn->hw_info.p_igu_info) {
163 DP_ERR(p_hwfn,
164 "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
165 return;
166 }
167
168 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
169 sb_id++) {
170 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
171 if ((p_sb->status & QED_IGU_STATUS_FREE) &&
172 !(p_sb->status & QED_IGU_STATUS_PF)) {
173 val = qed_rd(p_hwfn, p_ptt,
174 IGU_REG_MAPPING_MEMORY + sb_id * 4);
175 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
176 qed_wr(p_hwfn, p_ptt,
177 IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
178 }
179 }
180}
181
182static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
183{
184 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
185 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
186 struct qed_bulletin_content *p_bulletin_virt;
187 dma_addr_t req_p, rply_p, bulletin_p;
188 union pfvf_tlvs *p_reply_virt_addr;
189 union vfpf_tlvs *p_req_virt_addr;
190 u8 idx = 0;
191
192 memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
193
194 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
195 req_p = p_iov_info->mbx_msg_phys_addr;
196 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
197 rply_p = p_iov_info->mbx_reply_phys_addr;
198 p_bulletin_virt = p_iov_info->p_bulletins;
199 bulletin_p = p_iov_info->bulletins_phys;
200 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
201 DP_ERR(p_hwfn,
202 "qed_iov_setup_vfdb called without allocating mem first\n");
203 return;
204 }
205
206 for (idx = 0; idx < p_iov->total_vfs; idx++) {
207 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
208 u32 concrete;
209
210 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
211 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
212 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
213 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
214
215 vf->state = VF_STOPPED;
216 vf->b_init = false;
217
218 vf->bulletin.phys = idx *
219 sizeof(struct qed_bulletin_content) +
220 bulletin_p;
221 vf->bulletin.p_virt = p_bulletin_virt + idx;
222 vf->bulletin.size = sizeof(struct qed_bulletin_content);
223
224 vf->relative_vf_id = idx;
225 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
226 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
227 vf->concrete_fid = concrete;
228 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
229 (vf->abs_vf_id << 8);
230 vf->vport_id = idx + 1;
231 }
232}
233
234static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
235{
236 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
237 void **p_v_addr;
238 u16 num_vfs = 0;
239
240 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
241
242 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
243 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
244
245 /* Allocate PF Mailbox buffer (per-VF) */
246 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
247 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
248 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
249 p_iov_info->mbx_msg_size,
250 &p_iov_info->mbx_msg_phys_addr,
251 GFP_KERNEL);
252 if (!*p_v_addr)
253 return -ENOMEM;
254
255 /* Allocate PF Mailbox Reply buffer (per-VF) */
256 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
257 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
258 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
259 p_iov_info->mbx_reply_size,
260 &p_iov_info->mbx_reply_phys_addr,
261 GFP_KERNEL);
262 if (!*p_v_addr)
263 return -ENOMEM;
264
265 p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
266 num_vfs;
267 p_v_addr = &p_iov_info->p_bulletins;
268 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
269 p_iov_info->bulletins_size,
270 &p_iov_info->bulletins_phys,
271 GFP_KERNEL);
272 if (!*p_v_addr)
273 return -ENOMEM;
274
275 DP_VERBOSE(p_hwfn,
276 QED_MSG_IOV,
277 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
278 p_iov_info->mbx_msg_virt_addr,
279 (u64) p_iov_info->mbx_msg_phys_addr,
280 p_iov_info->mbx_reply_virt_addr,
281 (u64) p_iov_info->mbx_reply_phys_addr,
282 p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
283
284 return 0;
285}
286
287static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
288{
289 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
290
291 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
292 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
293 p_iov_info->mbx_msg_size,
294 p_iov_info->mbx_msg_virt_addr,
295 p_iov_info->mbx_msg_phys_addr);
296
297 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
298 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
299 p_iov_info->mbx_reply_size,
300 p_iov_info->mbx_reply_virt_addr,
301 p_iov_info->mbx_reply_phys_addr);
302
303 if (p_iov_info->p_bulletins)
304 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
305 p_iov_info->bulletins_size,
306 p_iov_info->p_bulletins,
307 p_iov_info->bulletins_phys);
308}
309
310int qed_iov_alloc(struct qed_hwfn *p_hwfn)
311{
312 struct qed_pf_iov *p_sriov;
313
314 if (!IS_PF_SRIOV(p_hwfn)) {
315 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
316 "No SR-IOV - no need for IOV db\n");
317 return 0;
318 }
319
320 p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
321 if (!p_sriov) {
322 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
323 return -ENOMEM;
324 }
325
326 p_hwfn->pf_iov_info = p_sriov;
327
328 return qed_iov_allocate_vfdb(p_hwfn);
329}
330
331void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
332{
333 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
334 return;
335
336 qed_iov_setup_vfdb(p_hwfn);
337 qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
338}
339
340void qed_iov_free(struct qed_hwfn *p_hwfn)
341{
342 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
343 qed_iov_free_vfdb(p_hwfn);
344 kfree(p_hwfn->pf_iov_info);
345 }
346}
347
348void qed_iov_free_hw_info(struct qed_dev *cdev)
349{
350 kfree(cdev->p_iov_info);
351 cdev->p_iov_info = NULL;
352}
353
354int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
355{
356 struct qed_dev *cdev = p_hwfn->cdev;
357 int pos;
358 int rc;
359
1408cc1f
YM
360 if (IS_VF(p_hwfn->cdev))
361 return 0;
362
32a47e72
YM
363 /* Learn the PCI configuration */
364 pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
365 PCI_EXT_CAP_ID_SRIOV);
366 if (!pos) {
367 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
368 return 0;
369 }
370
371 /* Allocate a new struct for IOV information */
372 cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
373 if (!cdev->p_iov_info) {
374 DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
375 return -ENOMEM;
376 }
377 cdev->p_iov_info->pos = pos;
378
379 rc = qed_iov_pci_cfg_info(cdev);
380 if (rc)
381 return rc;
382
383 /* We want PF IOV to be synonemous with the existance of p_iov_info;
384 * In case the capability is published but there are no VFs, simply
385 * de-allocate the struct.
386 */
387 if (!cdev->p_iov_info->total_vfs) {
388 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
389 "IOV capabilities, but no VFs are published\n");
390 kfree(cdev->p_iov_info);
391 cdev->p_iov_info = NULL;
392 return 0;
393 }
394
395 /* Calculate the first VF index - this is a bit tricky; Basically,
396 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
397 * after the first engine's VFs.
398 */
399 cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
400 p_hwfn->abs_pf_id - 16;
401 if (QED_PATH_ID(p_hwfn))
402 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
403
404 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
405 "First VF in hwfn 0x%08x\n",
406 cdev->p_iov_info->first_vf_in_pf);
407
408 return 0;
409}
410
37bff2b9
YM
411static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
412{
413 /* Check PF supports sriov */
414 if (!IS_QED_SRIOV(p_hwfn->cdev) || !IS_PF_SRIOV_ALLOC(p_hwfn))
415 return false;
416
417 /* Check VF validity */
1408cc1f
YM
418 if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
419 !IS_PF_SRIOV_ALLOC(p_hwfn))
37bff2b9
YM
420 return false;
421
422 return true;
423}
424
1408cc1f
YM
425static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
426 struct qed_ptt *p_ptt, u8 abs_vfid)
427{
428 qed_wr(p_hwfn, p_ptt,
429 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
430 1 << (abs_vfid & 0x1f));
431}
432
433static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
434 struct qed_ptt *p_ptt,
435 struct qed_vf_info *vf)
436{
437 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
438 int rc;
439
440 DP_VERBOSE(p_hwfn,
441 QED_MSG_IOV,
442 "Enable internal access for vf %x [abs %x]\n",
443 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
444
445 qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
446
447 rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
448 if (rc)
449 return rc;
450
451 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
452
453 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
454 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
455
456 qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
457 p_hwfn->hw_info.hw_mode);
458
459 /* unpretend */
460 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
461
462 if (vf->state != VF_STOPPED) {
463 DP_NOTICE(p_hwfn, "VF[%02x] is already started\n",
464 vf->abs_vf_id);
465 return -EINVAL;
466 }
467
468 /* Start VF */
469 rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
470 if (rc)
471 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
472
473 vf->state = VF_FREE;
474
475 return rc;
476}
477
478static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
479 struct qed_ptt *p_ptt,
480 struct qed_vf_info *vf, u16 num_rx_queues)
481{
482 struct qed_igu_block *igu_blocks;
483 int qid = 0, igu_id = 0;
484 u32 val = 0;
485
486 igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
487
488 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
489 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
490 p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
491
492 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
493 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
494 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
495
496 while ((qid < num_rx_queues) &&
497 (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
498 if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
499 struct cau_sb_entry sb_entry;
500
501 vf->igu_sbs[qid] = (u16)igu_id;
502 igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
503
504 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
505
506 qed_wr(p_hwfn, p_ptt,
507 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
508 val);
509
510 /* Configure igu sb in CAU which were marked valid */
511 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
512 p_hwfn->rel_pf_id,
513 vf->abs_vf_id, 1);
514 qed_dmae_host2grc(p_hwfn, p_ptt,
515 (u64)(uintptr_t)&sb_entry,
516 CAU_REG_SB_VAR_MEMORY +
517 igu_id * sizeof(u64), 2, 0);
518 qid++;
519 }
520 igu_id++;
521 }
522
523 vf->num_sbs = (u8) num_rx_queues;
524
525 return vf->num_sbs;
526}
527
528static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
529 struct qed_ptt *p_ptt,
530 u16 rel_vf_id, u16 num_rx_queues)
531{
532 u8 num_of_vf_avaiable_chains = 0;
533 struct qed_vf_info *vf = NULL;
534 int rc = 0;
535 u32 cids;
536 u8 i;
537
538 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
539 if (!vf) {
540 DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
541 return -EINVAL;
542 }
543
544 if (vf->b_init) {
545 DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
546 return -EINVAL;
547 }
548
549 /* Limit number of queues according to number of CIDs */
550 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
551 DP_VERBOSE(p_hwfn,
552 QED_MSG_IOV,
553 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
554 vf->relative_vf_id, num_rx_queues, (u16) cids);
555 num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
556
557 num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
558 p_ptt,
559 vf,
560 num_rx_queues);
561 if (!num_of_vf_avaiable_chains) {
562 DP_ERR(p_hwfn, "no available igu sbs\n");
563 return -ENOMEM;
564 }
565
566 /* Choose queue number and index ranges */
567 vf->num_rxqs = num_of_vf_avaiable_chains;
568 vf->num_txqs = num_of_vf_avaiable_chains;
569
570 for (i = 0; i < vf->num_rxqs; i++) {
571 u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
572 vf->igu_sbs[i]);
573
574 if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
575 DP_NOTICE(p_hwfn,
576 "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
577 vf->relative_vf_id, queue_id);
578 return -EINVAL;
579 }
580
581 /* CIDs are per-VF, so no problem having them 0-based. */
582 vf->vf_queues[i].fw_rx_qid = queue_id;
583 vf->vf_queues[i].fw_tx_qid = queue_id;
584 vf->vf_queues[i].fw_cid = i;
585
586 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
587 "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
588 vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
589 }
590 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
591 if (!rc) {
592 vf->b_init = true;
593
594 if (IS_LEAD_HWFN(p_hwfn))
595 p_hwfn->cdev->p_iov_info->num_vfs++;
596 }
597
598 return rc;
599}
600
37bff2b9
YM
601static bool qed_iov_tlv_supported(u16 tlvtype)
602{
603 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
604}
605
606/* place a given tlv on the tlv buffer, continuing current tlv list */
607void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
608{
609 struct channel_tlv *tl = (struct channel_tlv *)*offset;
610
611 tl->type = type;
612 tl->length = length;
613
614 /* Offset should keep pointing to next TLV (the end of the last) */
615 *offset += length;
616
617 /* Return a pointer to the start of the added tlv */
618 return *offset - length;
619}
620
621/* list the types and lengths of the tlvs on the buffer */
622void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
623{
624 u16 i = 1, total_length = 0;
625 struct channel_tlv *tlv;
626
627 do {
628 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
629
630 /* output tlv */
631 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
632 "TLV number %d: type %d, length %d\n",
633 i, tlv->type, tlv->length);
634
635 if (tlv->type == CHANNEL_TLV_LIST_END)
636 return;
637
638 /* Validate entry - protect against malicious VFs */
639 if (!tlv->length) {
640 DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
641 return;
642 }
643
644 total_length += tlv->length;
645
646 if (total_length >= sizeof(struct tlv_buffer_size)) {
647 DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
648 return;
649 }
650
651 i++;
652 } while (1);
653}
654
655static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
656 struct qed_ptt *p_ptt,
657 struct qed_vf_info *p_vf,
658 u16 length, u8 status)
659{
660 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
661 struct qed_dmae_params params;
662 u8 eng_vf_id;
663
664 mbx->reply_virt->default_resp.hdr.status = status;
665
666 qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
667
668 eng_vf_id = p_vf->abs_vf_id;
669
670 memset(&params, 0, sizeof(struct qed_dmae_params));
671 params.flags = QED_DMAE_FLAG_VF_DST;
672 params.dst_vfid = eng_vf_id;
673
674 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
675 mbx->req_virt->first_tlv.reply_address +
676 sizeof(u64),
677 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
678 &params);
679
680 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
681 mbx->req_virt->first_tlv.reply_address,
682 sizeof(u64) / 4, &params);
683
684 REG_WR(p_hwfn,
685 GTT_BAR0_MAP_REG_USDM_RAM +
686 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
687}
688
689static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
690 struct qed_ptt *p_ptt,
691 struct qed_vf_info *vf_info,
692 u16 type, u16 length, u8 status)
693{
694 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
695
696 mbx->offset = (u8 *)mbx->reply_virt;
697
698 qed_add_tlv(p_hwfn, &mbx->offset, type, length);
699 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
700 sizeof(struct channel_list_end_tlv));
701
702 qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
703}
704
1408cc1f
YM
705static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
706 struct qed_ptt *p_ptt,
707 struct qed_vf_info *vf)
37bff2b9 708{
1408cc1f
YM
709 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
710 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
711 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
712 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
713 u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
714 struct pf_vf_resc *resc = &resp->resc;
715
716 /* Validate FW compatibility */
717 if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
718 req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
719 req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
720 req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
721 DP_INFO(p_hwfn,
722 "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n",
723 vf->abs_vf_id,
724 req->vfdev_info.fw_major,
725 req->vfdev_info.fw_minor,
726 req->vfdev_info.fw_revision,
727 req->vfdev_info.fw_engineering,
728 FW_MAJOR_VERSION,
729 FW_MINOR_VERSION,
730 FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
731 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
732 goto out;
733 }
734
735 /* On 100g PFs, prevent old VFs from loading */
736 if ((p_hwfn->cdev->num_hwfns > 1) &&
737 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
738 DP_INFO(p_hwfn,
739 "VF[%d] is running an old driver that doesn't support 100g\n",
740 vf->abs_vf_id);
741 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
742 goto out;
743 }
744
745 memset(resp, 0, sizeof(*resp));
746
747 /* Fill in vf info stuff */
748 vf->opaque_fid = req->vfdev_info.opaque_fid;
749 vf->num_mac_filters = 1;
750 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
751
752 vf->vf_bulletin = req->bulletin_addr;
753 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
754 vf->bulletin.size : req->bulletin_size;
755
756 /* fill in pfdev info */
757 pfdev_info->chip_num = p_hwfn->cdev->chip_num;
758 pfdev_info->db_size = 0;
759 pfdev_info->indices_per_sb = PIS_PER_SB;
760
761 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
762 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
763 if (p_hwfn->cdev->num_hwfns > 1)
764 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
765
766 pfdev_info->stats_info.mstats.address =
767 PXP_VF_BAR0_START_MSDM_ZONE_B +
768 offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
769 pfdev_info->stats_info.mstats.len =
770 sizeof(struct eth_mstorm_per_queue_stat);
771
772 pfdev_info->stats_info.ustats.address =
773 PXP_VF_BAR0_START_USDM_ZONE_B +
774 offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
775 pfdev_info->stats_info.ustats.len =
776 sizeof(struct eth_ustorm_per_queue_stat);
777
778 pfdev_info->stats_info.pstats.address =
779 PXP_VF_BAR0_START_PSDM_ZONE_B +
780 offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
781 pfdev_info->stats_info.pstats.len =
782 sizeof(struct eth_pstorm_per_queue_stat);
783
784 pfdev_info->stats_info.tstats.address = 0;
785 pfdev_info->stats_info.tstats.len = 0;
786
787 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
788
789 pfdev_info->fw_major = FW_MAJOR_VERSION;
790 pfdev_info->fw_minor = FW_MINOR_VERSION;
791 pfdev_info->fw_rev = FW_REVISION_VERSION;
792 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
793 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
794 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
795
796 pfdev_info->dev_type = p_hwfn->cdev->type;
797 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
798
799 resc->num_rxqs = vf->num_rxqs;
800 resc->num_txqs = vf->num_txqs;
801 resc->num_sbs = vf->num_sbs;
802 for (i = 0; i < resc->num_sbs; i++) {
803 resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
804 resc->hw_sbs[i].sb_qid = 0;
805 }
806
807 for (i = 0; i < resc->num_rxqs; i++) {
808 qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
809 (u16 *)&resc->hw_qid[i]);
810 resc->cid[i] = vf->vf_queues[i].fw_cid;
811 }
812
813 resc->num_mac_filters = min_t(u8, vf->num_mac_filters,
814 req->resc_request.num_mac_filters);
815 resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters,
816 req->resc_request.num_vlan_filters);
817
818 /* This isn't really required as VF isn't limited, but some VFs might
819 * actually test this value, so need to provide it.
820 */
821 resc->num_mc_filters = req->resc_request.num_mc_filters;
822
823 /* Fill agreed size of bulletin board in response */
824 resp->bulletin_size = vf->bulletin.size;
825
826 DP_VERBOSE(p_hwfn,
827 QED_MSG_IOV,
828 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
829 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
830 vf->abs_vf_id,
831 resp->pfdev_info.chip_num,
832 resp->pfdev_info.db_size,
833 resp->pfdev_info.indices_per_sb,
834 resp->pfdev_info.capabilities,
835 resc->num_rxqs,
836 resc->num_txqs,
837 resc->num_sbs,
838 resc->num_mac_filters,
839 resc->num_vlan_filters);
840 vf->state = VF_ACQUIRED;
841
842 /* Prepare Response */
843out:
844 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
845 sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
37bff2b9
YM
846}
847
848static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
849 struct qed_ptt *p_ptt, int vfid)
850{
851 struct qed_iov_vf_mbx *mbx;
852 struct qed_vf_info *p_vf;
853 int i;
854
855 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
856 if (!p_vf)
857 return;
858
859 mbx = &p_vf->vf_mbx;
860
861 /* qed_iov_process_mbx_request */
862 DP_VERBOSE(p_hwfn,
863 QED_MSG_IOV,
864 "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
865
866 mbx->first_tlv = mbx->req_virt->first_tlv;
867
868 /* check if tlv type is known */
869 if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
1408cc1f
YM
870 switch (mbx->first_tlv.tl.type) {
871 case CHANNEL_TLV_ACQUIRE:
872 qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
873 break;
874 }
37bff2b9
YM
875 } else {
876 /* unknown TLV - this may belong to a VF driver from the future
877 * - a version written after this PF driver was written, which
878 * supports features unknown as of yet. Too bad since we don't
879 * support them. Or this may be because someone wrote a crappy
880 * VF driver and is sending garbage over the channel.
881 */
882 DP_ERR(p_hwfn,
883 "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
884 mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
885
886 for (i = 0; i < 20; i++) {
887 DP_VERBOSE(p_hwfn,
888 QED_MSG_IOV,
889 "%x ",
890 mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
891 }
892 }
893}
894
895void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
896{
897 u64 add_bit = 1ULL << (vfid % 64);
898
899 p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
900}
901
902static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
903 u64 *events)
904{
905 u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
906
907 memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
908 memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
909}
910
911static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
912 u16 abs_vfid, struct regpair *vf_msg)
913{
914 u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
915 struct qed_vf_info *p_vf;
916
917 if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
918 DP_VERBOSE(p_hwfn,
919 QED_MSG_IOV,
920 "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
921 abs_vfid);
922 return 0;
923 }
924 p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
925
926 /* List the physical address of the request so that handler
927 * could later on copy the message from it.
928 */
929 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
930
931 /* Mark the event and schedule the workqueue */
932 qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
933 qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
934
935 return 0;
936}
937
938int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
939 u8 opcode, __le16 echo, union event_ring_data *data)
940{
941 switch (opcode) {
942 case COMMON_EVENT_VF_PF_CHANNEL:
943 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
944 &data->vf_pf_channel.msg_addr);
945 default:
946 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
947 opcode);
948 return -EINVAL;
949 }
950}
951
32a47e72
YM
952u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
953{
954 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
955 u16 i;
956
957 if (!p_iov)
958 goto out;
959
960 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
961 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
962 return i;
963
964out:
965 return MAX_NUM_VFS;
966}
37bff2b9
YM
967
968static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
969 int vfid)
970{
971 struct qed_dmae_params params;
972 struct qed_vf_info *vf_info;
973
974 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
975 if (!vf_info)
976 return -EINVAL;
977
978 memset(&params, 0, sizeof(struct qed_dmae_params));
979 params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
980 params.src_vfid = vf_info->abs_vf_id;
981
982 if (qed_dmae_host2host(p_hwfn, ptt,
983 vf_info->vf_mbx.pending_req,
984 vf_info->vf_mbx.req_phys,
985 sizeof(union vfpf_tlvs) / 4, &params)) {
986 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
987 "Failed to copy message from VF 0x%02x\n", vfid);
988
989 return -EIO;
990 }
991
992 return 0;
993}
994
995/**
996 * qed_schedule_iov - schedules IOV task for VF and PF
997 * @hwfn: hardware function pointer
998 * @flag: IOV flag for VF/PF
999 */
1000void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
1001{
1002 smp_mb__before_atomic();
1003 set_bit(flag, &hwfn->iov_task_flags);
1004 smp_mb__after_atomic();
1005 DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
1006 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
1007}
1008
1408cc1f
YM
1009void qed_vf_start_iov_wq(struct qed_dev *cdev)
1010{
1011 int i;
1012
1013 for_each_hwfn(cdev, i)
1014 queue_delayed_work(cdev->hwfns[i].iov_wq,
1015 &cdev->hwfns[i].iov_task, 0);
1016}
1017
37bff2b9
YM
1018static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
1019{
1020 u64 events[QED_VF_ARRAY_LENGTH];
1021 struct qed_ptt *ptt;
1022 int i;
1023
1024 ptt = qed_ptt_acquire(hwfn);
1025 if (!ptt) {
1026 DP_VERBOSE(hwfn, QED_MSG_IOV,
1027 "Can't acquire PTT; re-scheduling\n");
1028 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
1029 return;
1030 }
1031
1032 qed_iov_pf_get_and_clear_pending_events(hwfn, events);
1033
1034 DP_VERBOSE(hwfn, QED_MSG_IOV,
1035 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
1036 events[0], events[1], events[2]);
1037
1038 qed_for_each_vf(hwfn, i) {
1039 /* Skip VFs with no pending messages */
1040 if (!(events[i / 64] & (1ULL << (i % 64))))
1041 continue;
1042
1043 DP_VERBOSE(hwfn, QED_MSG_IOV,
1044 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
1045 i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1046
1047 /* Copy VF's message to PF's request buffer for that VF */
1048 if (qed_iov_copy_vf_msg(hwfn, ptt, i))
1049 continue;
1050
1051 qed_iov_process_mbx_req(hwfn, ptt, i);
1052 }
1053
1054 qed_ptt_release(hwfn, ptt);
1055}
1056
1057void qed_iov_pf_task(struct work_struct *work)
1058{
1059 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1060 iov_task.work);
1061
1062 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
1063 return;
1064
1065 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
1066 qed_handle_vf_msg(hwfn);
1067}
1068
1069void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
1070{
1071 int i;
1072
1073 for_each_hwfn(cdev, i) {
1074 if (!cdev->hwfns[i].iov_wq)
1075 continue;
1076
1077 if (schedule_first) {
1078 qed_schedule_iov(&cdev->hwfns[i],
1079 QED_IOV_WQ_STOP_WQ_FLAG);
1080 cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
1081 }
1082
1083 flush_workqueue(cdev->hwfns[i].iov_wq);
1084 destroy_workqueue(cdev->hwfns[i].iov_wq);
1085 }
1086}
1087
1088int qed_iov_wq_start(struct qed_dev *cdev)
1089{
1090 char name[NAME_SIZE];
1091 int i;
1092
1093 for_each_hwfn(cdev, i) {
1094 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1095
1096 /* PFs needs a dedicated workqueue only if they support IOV. */
1097 if (!IS_PF_SRIOV(p_hwfn))
1098 continue;
1099
1100 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
1101 cdev->pdev->bus->number,
1102 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
1103
1104 p_hwfn->iov_wq = create_singlethread_workqueue(name);
1105 if (!p_hwfn->iov_wq) {
1106 DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
1107 return -ENOMEM;
1108 }
1109
1110 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
1111 }
1112
1113 return 0;
1114}