]>
Commit | Line | Data |
---|---|---|
32a47e72 YM |
1 | /* QLogic qed NIC Driver |
2 | * Copyright (c) 2015 QLogic Corporation | |
3 | * | |
4 | * This software is available under the terms of the GNU General Public License | |
5 | * (GPL) Version 2, available from the file COPYING in the main directory of | |
6 | * this source tree. | |
7 | */ | |
8 | ||
dacd88d6 | 9 | #include <linux/etherdevice.h> |
36558c3d | 10 | #include <linux/crc32.h> |
0b55e27d | 11 | #include <linux/qed/qed_iov_if.h> |
1408cc1f YM |
12 | #include "qed_cxt.h" |
13 | #include "qed_hsi.h" | |
32a47e72 | 14 | #include "qed_hw.h" |
1408cc1f | 15 | #include "qed_init_ops.h" |
32a47e72 | 16 | #include "qed_int.h" |
1408cc1f | 17 | #include "qed_mcp.h" |
32a47e72 | 18 | #include "qed_reg_addr.h" |
1408cc1f | 19 | #include "qed_sp.h" |
32a47e72 YM |
20 | #include "qed_sriov.h" |
21 | #include "qed_vf.h" | |
22 | ||
1408cc1f YM |
23 | /* IOV ramrods */ |
24 | static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, | |
25 | u32 concrete_vfid, u16 opaque_vfid) | |
26 | { | |
27 | struct vf_start_ramrod_data *p_ramrod = NULL; | |
28 | struct qed_spq_entry *p_ent = NULL; | |
29 | struct qed_sp_init_data init_data; | |
30 | int rc = -EINVAL; | |
31 | ||
32 | /* Get SPQ entry */ | |
33 | memset(&init_data, 0, sizeof(init_data)); | |
34 | init_data.cid = qed_spq_get_cid(p_hwfn); | |
35 | init_data.opaque_fid = opaque_vfid; | |
36 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
37 | ||
38 | rc = qed_sp_init_request(p_hwfn, &p_ent, | |
39 | COMMON_RAMROD_VF_START, | |
40 | PROTOCOLID_COMMON, &init_data); | |
41 | if (rc) | |
42 | return rc; | |
43 | ||
44 | p_ramrod = &p_ent->ramrod.vf_start; | |
45 | ||
46 | p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); | |
47 | p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid); | |
48 | ||
49 | p_ramrod->personality = PERSONALITY_ETH; | |
50 | ||
51 | return qed_spq_post(p_hwfn, p_ent, NULL); | |
52 | } | |
53 | ||
0b55e27d YM |
54 | static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, |
55 | u32 concrete_vfid, u16 opaque_vfid) | |
56 | { | |
57 | struct vf_stop_ramrod_data *p_ramrod = NULL; | |
58 | struct qed_spq_entry *p_ent = NULL; | |
59 | struct qed_sp_init_data init_data; | |
60 | int rc = -EINVAL; | |
61 | ||
62 | /* Get SPQ entry */ | |
63 | memset(&init_data, 0, sizeof(init_data)); | |
64 | init_data.cid = qed_spq_get_cid(p_hwfn); | |
65 | init_data.opaque_fid = opaque_vfid; | |
66 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | |
67 | ||
68 | rc = qed_sp_init_request(p_hwfn, &p_ent, | |
69 | COMMON_RAMROD_VF_STOP, | |
70 | PROTOCOLID_COMMON, &init_data); | |
71 | if (rc) | |
72 | return rc; | |
73 | ||
74 | p_ramrod = &p_ent->ramrod.vf_stop; | |
75 | ||
76 | p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); | |
77 | ||
78 | return qed_spq_post(p_hwfn, p_ent, NULL); | |
79 | } | |
80 | ||
32a47e72 YM |
81 | bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, |
82 | int rel_vf_id, bool b_enabled_only) | |
83 | { | |
84 | if (!p_hwfn->pf_iov_info) { | |
85 | DP_NOTICE(p_hwfn->cdev, "No iov info\n"); | |
86 | return false; | |
87 | } | |
88 | ||
89 | if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) || | |
90 | (rel_vf_id < 0)) | |
91 | return false; | |
92 | ||
93 | if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && | |
94 | b_enabled_only) | |
95 | return false; | |
96 | ||
97 | return true; | |
98 | } | |
99 | ||
37bff2b9 YM |
100 | static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, |
101 | u16 relative_vf_id, | |
102 | bool b_enabled_only) | |
103 | { | |
104 | struct qed_vf_info *vf = NULL; | |
105 | ||
106 | if (!p_hwfn->pf_iov_info) { | |
107 | DP_NOTICE(p_hwfn->cdev, "No iov info\n"); | |
108 | return NULL; | |
109 | } | |
110 | ||
111 | if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only)) | |
112 | vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; | |
113 | else | |
114 | DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", | |
115 | relative_vf_id); | |
116 | ||
117 | return vf; | |
118 | } | |
119 | ||
36558c3d YM |
120 | int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, |
121 | int vfid, struct qed_ptt *p_ptt) | |
122 | { | |
123 | struct qed_bulletin_content *p_bulletin; | |
124 | int crc_size = sizeof(p_bulletin->crc); | |
125 | struct qed_dmae_params params; | |
126 | struct qed_vf_info *p_vf; | |
127 | ||
128 | p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); | |
129 | if (!p_vf) | |
130 | return -EINVAL; | |
131 | ||
132 | if (!p_vf->vf_bulletin) | |
133 | return -EINVAL; | |
134 | ||
135 | p_bulletin = p_vf->bulletin.p_virt; | |
136 | ||
137 | /* Increment bulletin board version and compute crc */ | |
138 | p_bulletin->version++; | |
139 | p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size, | |
140 | p_vf->bulletin.size - crc_size); | |
141 | ||
142 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
143 | "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n", | |
144 | p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); | |
145 | ||
146 | /* propagate bulletin board via dmae to vm memory */ | |
147 | memset(¶ms, 0, sizeof(params)); | |
148 | params.flags = QED_DMAE_FLAG_VF_DST; | |
149 | params.dst_vfid = p_vf->abs_vf_id; | |
150 | return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, | |
151 | p_vf->vf_bulletin, p_vf->bulletin.size / 4, | |
152 | ¶ms); | |
153 | } | |
154 | ||
32a47e72 YM |
155 | static int qed_iov_pci_cfg_info(struct qed_dev *cdev) |
156 | { | |
157 | struct qed_hw_sriov_info *iov = cdev->p_iov_info; | |
158 | int pos = iov->pos; | |
159 | ||
160 | DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos); | |
161 | pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); | |
162 | ||
163 | pci_read_config_word(cdev->pdev, | |
164 | pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); | |
165 | pci_read_config_word(cdev->pdev, | |
166 | pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); | |
167 | ||
168 | pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); | |
169 | if (iov->num_vfs) { | |
170 | DP_VERBOSE(cdev, | |
171 | QED_MSG_IOV, | |
172 | "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n"); | |
173 | iov->num_vfs = 0; | |
174 | } | |
175 | ||
176 | pci_read_config_word(cdev->pdev, | |
177 | pos + PCI_SRIOV_VF_OFFSET, &iov->offset); | |
178 | ||
179 | pci_read_config_word(cdev->pdev, | |
180 | pos + PCI_SRIOV_VF_STRIDE, &iov->stride); | |
181 | ||
182 | pci_read_config_word(cdev->pdev, | |
183 | pos + PCI_SRIOV_VF_DID, &iov->vf_device_id); | |
184 | ||
185 | pci_read_config_dword(cdev->pdev, | |
186 | pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); | |
187 | ||
188 | pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap); | |
189 | ||
190 | pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); | |
191 | ||
192 | DP_VERBOSE(cdev, | |
193 | QED_MSG_IOV, | |
194 | "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", | |
195 | iov->nres, | |
196 | iov->cap, | |
197 | iov->ctrl, | |
198 | iov->total_vfs, | |
199 | iov->initial_vfs, | |
200 | iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); | |
201 | ||
202 | /* Some sanity checks */ | |
203 | if (iov->num_vfs > NUM_OF_VFS(cdev) || | |
204 | iov->total_vfs > NUM_OF_VFS(cdev)) { | |
205 | /* This can happen only due to a bug. In this case we set | |
206 | * num_vfs to zero to avoid memory corruption in the code that | |
207 | * assumes max number of vfs | |
208 | */ | |
209 | DP_NOTICE(cdev, | |
210 | "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n", | |
211 | iov->num_vfs); | |
212 | ||
213 | iov->num_vfs = 0; | |
214 | iov->total_vfs = 0; | |
215 | } | |
216 | ||
217 | return 0; | |
218 | } | |
219 | ||
220 | static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn, | |
221 | struct qed_ptt *p_ptt) | |
222 | { | |
223 | struct qed_igu_block *p_sb; | |
224 | u16 sb_id; | |
225 | u32 val; | |
226 | ||
227 | if (!p_hwfn->hw_info.p_igu_info) { | |
228 | DP_ERR(p_hwfn, | |
229 | "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n"); | |
230 | return; | |
231 | } | |
232 | ||
233 | for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); | |
234 | sb_id++) { | |
235 | p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; | |
236 | if ((p_sb->status & QED_IGU_STATUS_FREE) && | |
237 | !(p_sb->status & QED_IGU_STATUS_PF)) { | |
238 | val = qed_rd(p_hwfn, p_ptt, | |
239 | IGU_REG_MAPPING_MEMORY + sb_id * 4); | |
240 | SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); | |
241 | qed_wr(p_hwfn, p_ptt, | |
242 | IGU_REG_MAPPING_MEMORY + 4 * sb_id, val); | |
243 | } | |
244 | } | |
245 | } | |
246 | ||
247 | static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) | |
248 | { | |
249 | struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; | |
250 | struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; | |
251 | struct qed_bulletin_content *p_bulletin_virt; | |
252 | dma_addr_t req_p, rply_p, bulletin_p; | |
253 | union pfvf_tlvs *p_reply_virt_addr; | |
254 | union vfpf_tlvs *p_req_virt_addr; | |
255 | u8 idx = 0; | |
256 | ||
257 | memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); | |
258 | ||
259 | p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; | |
260 | req_p = p_iov_info->mbx_msg_phys_addr; | |
261 | p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; | |
262 | rply_p = p_iov_info->mbx_reply_phys_addr; | |
263 | p_bulletin_virt = p_iov_info->p_bulletins; | |
264 | bulletin_p = p_iov_info->bulletins_phys; | |
265 | if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { | |
266 | DP_ERR(p_hwfn, | |
267 | "qed_iov_setup_vfdb called without allocating mem first\n"); | |
268 | return; | |
269 | } | |
270 | ||
271 | for (idx = 0; idx < p_iov->total_vfs; idx++) { | |
272 | struct qed_vf_info *vf = &p_iov_info->vfs_array[idx]; | |
273 | u32 concrete; | |
274 | ||
275 | vf->vf_mbx.req_virt = p_req_virt_addr + idx; | |
276 | vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); | |
277 | vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; | |
278 | vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); | |
279 | ||
280 | vf->state = VF_STOPPED; | |
281 | vf->b_init = false; | |
282 | ||
283 | vf->bulletin.phys = idx * | |
284 | sizeof(struct qed_bulletin_content) + | |
285 | bulletin_p; | |
286 | vf->bulletin.p_virt = p_bulletin_virt + idx; | |
287 | vf->bulletin.size = sizeof(struct qed_bulletin_content); | |
288 | ||
289 | vf->relative_vf_id = idx; | |
290 | vf->abs_vf_id = idx + p_iov->first_vf_in_pf; | |
291 | concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id); | |
292 | vf->concrete_fid = concrete; | |
293 | vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | | |
294 | (vf->abs_vf_id << 8); | |
295 | vf->vport_id = idx + 1; | |
296 | } | |
297 | } | |
298 | ||
299 | static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) | |
300 | { | |
301 | struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; | |
302 | void **p_v_addr; | |
303 | u16 num_vfs = 0; | |
304 | ||
305 | num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; | |
306 | ||
307 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
308 | "qed_iov_allocate_vfdb for %d VFs\n", num_vfs); | |
309 | ||
310 | /* Allocate PF Mailbox buffer (per-VF) */ | |
311 | p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; | |
312 | p_v_addr = &p_iov_info->mbx_msg_virt_addr; | |
313 | *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, | |
314 | p_iov_info->mbx_msg_size, | |
315 | &p_iov_info->mbx_msg_phys_addr, | |
316 | GFP_KERNEL); | |
317 | if (!*p_v_addr) | |
318 | return -ENOMEM; | |
319 | ||
320 | /* Allocate PF Mailbox Reply buffer (per-VF) */ | |
321 | p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; | |
322 | p_v_addr = &p_iov_info->mbx_reply_virt_addr; | |
323 | *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, | |
324 | p_iov_info->mbx_reply_size, | |
325 | &p_iov_info->mbx_reply_phys_addr, | |
326 | GFP_KERNEL); | |
327 | if (!*p_v_addr) | |
328 | return -ENOMEM; | |
329 | ||
330 | p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) * | |
331 | num_vfs; | |
332 | p_v_addr = &p_iov_info->p_bulletins; | |
333 | *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, | |
334 | p_iov_info->bulletins_size, | |
335 | &p_iov_info->bulletins_phys, | |
336 | GFP_KERNEL); | |
337 | if (!*p_v_addr) | |
338 | return -ENOMEM; | |
339 | ||
340 | DP_VERBOSE(p_hwfn, | |
341 | QED_MSG_IOV, | |
342 | "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", | |
343 | p_iov_info->mbx_msg_virt_addr, | |
344 | (u64) p_iov_info->mbx_msg_phys_addr, | |
345 | p_iov_info->mbx_reply_virt_addr, | |
346 | (u64) p_iov_info->mbx_reply_phys_addr, | |
347 | p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys); | |
348 | ||
349 | return 0; | |
350 | } | |
351 | ||
352 | static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn) | |
353 | { | |
354 | struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; | |
355 | ||
356 | if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) | |
357 | dma_free_coherent(&p_hwfn->cdev->pdev->dev, | |
358 | p_iov_info->mbx_msg_size, | |
359 | p_iov_info->mbx_msg_virt_addr, | |
360 | p_iov_info->mbx_msg_phys_addr); | |
361 | ||
362 | if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) | |
363 | dma_free_coherent(&p_hwfn->cdev->pdev->dev, | |
364 | p_iov_info->mbx_reply_size, | |
365 | p_iov_info->mbx_reply_virt_addr, | |
366 | p_iov_info->mbx_reply_phys_addr); | |
367 | ||
368 | if (p_iov_info->p_bulletins) | |
369 | dma_free_coherent(&p_hwfn->cdev->pdev->dev, | |
370 | p_iov_info->bulletins_size, | |
371 | p_iov_info->p_bulletins, | |
372 | p_iov_info->bulletins_phys); | |
373 | } | |
374 | ||
375 | int qed_iov_alloc(struct qed_hwfn *p_hwfn) | |
376 | { | |
377 | struct qed_pf_iov *p_sriov; | |
378 | ||
379 | if (!IS_PF_SRIOV(p_hwfn)) { | |
380 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
381 | "No SR-IOV - no need for IOV db\n"); | |
382 | return 0; | |
383 | } | |
384 | ||
385 | p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL); | |
386 | if (!p_sriov) { | |
387 | DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n"); | |
388 | return -ENOMEM; | |
389 | } | |
390 | ||
391 | p_hwfn->pf_iov_info = p_sriov; | |
392 | ||
393 | return qed_iov_allocate_vfdb(p_hwfn); | |
394 | } | |
395 | ||
396 | void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |
397 | { | |
398 | if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) | |
399 | return; | |
400 | ||
401 | qed_iov_setup_vfdb(p_hwfn); | |
402 | qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt); | |
403 | } | |
404 | ||
405 | void qed_iov_free(struct qed_hwfn *p_hwfn) | |
406 | { | |
407 | if (IS_PF_SRIOV_ALLOC(p_hwfn)) { | |
408 | qed_iov_free_vfdb(p_hwfn); | |
409 | kfree(p_hwfn->pf_iov_info); | |
410 | } | |
411 | } | |
412 | ||
413 | void qed_iov_free_hw_info(struct qed_dev *cdev) | |
414 | { | |
415 | kfree(cdev->p_iov_info); | |
416 | cdev->p_iov_info = NULL; | |
417 | } | |
418 | ||
419 | int qed_iov_hw_info(struct qed_hwfn *p_hwfn) | |
420 | { | |
421 | struct qed_dev *cdev = p_hwfn->cdev; | |
422 | int pos; | |
423 | int rc; | |
424 | ||
1408cc1f YM |
425 | if (IS_VF(p_hwfn->cdev)) |
426 | return 0; | |
427 | ||
32a47e72 YM |
428 | /* Learn the PCI configuration */ |
429 | pos = pci_find_ext_capability(p_hwfn->cdev->pdev, | |
430 | PCI_EXT_CAP_ID_SRIOV); | |
431 | if (!pos) { | |
432 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n"); | |
433 | return 0; | |
434 | } | |
435 | ||
436 | /* Allocate a new struct for IOV information */ | |
437 | cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL); | |
438 | if (!cdev->p_iov_info) { | |
439 | DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n"); | |
440 | return -ENOMEM; | |
441 | } | |
442 | cdev->p_iov_info->pos = pos; | |
443 | ||
444 | rc = qed_iov_pci_cfg_info(cdev); | |
445 | if (rc) | |
446 | return rc; | |
447 | ||
448 | /* We want PF IOV to be synonemous with the existance of p_iov_info; | |
449 | * In case the capability is published but there are no VFs, simply | |
450 | * de-allocate the struct. | |
451 | */ | |
452 | if (!cdev->p_iov_info->total_vfs) { | |
453 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
454 | "IOV capabilities, but no VFs are published\n"); | |
455 | kfree(cdev->p_iov_info); | |
456 | cdev->p_iov_info = NULL; | |
457 | return 0; | |
458 | } | |
459 | ||
460 | /* Calculate the first VF index - this is a bit tricky; Basically, | |
461 | * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin | |
462 | * after the first engine's VFs. | |
463 | */ | |
464 | cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset + | |
465 | p_hwfn->abs_pf_id - 16; | |
466 | if (QED_PATH_ID(p_hwfn)) | |
467 | cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; | |
468 | ||
469 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
470 | "First VF in hwfn 0x%08x\n", | |
471 | cdev->p_iov_info->first_vf_in_pf); | |
472 | ||
473 | return 0; | |
474 | } | |
475 | ||
37bff2b9 YM |
476 | static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) |
477 | { | |
478 | /* Check PF supports sriov */ | |
479 | if (!IS_QED_SRIOV(p_hwfn->cdev) || !IS_PF_SRIOV_ALLOC(p_hwfn)) | |
480 | return false; | |
481 | ||
482 | /* Check VF validity */ | |
1408cc1f YM |
483 | if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || |
484 | !IS_PF_SRIOV_ALLOC(p_hwfn)) | |
37bff2b9 YM |
485 | return false; |
486 | ||
487 | return true; | |
488 | } | |
489 | ||
0b55e27d YM |
490 | static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, |
491 | u16 rel_vf_id, u8 to_disable) | |
492 | { | |
493 | struct qed_vf_info *vf; | |
494 | int i; | |
495 | ||
496 | for_each_hwfn(cdev, i) { | |
497 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | |
498 | ||
499 | vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); | |
500 | if (!vf) | |
501 | continue; | |
502 | ||
503 | vf->to_disable = to_disable; | |
504 | } | |
505 | } | |
506 | ||
507 | void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable) | |
508 | { | |
509 | u16 i; | |
510 | ||
511 | if (!IS_QED_SRIOV(cdev)) | |
512 | return; | |
513 | ||
514 | for (i = 0; i < cdev->p_iov_info->total_vfs; i++) | |
515 | qed_iov_set_vf_to_disable(cdev, i, to_disable); | |
516 | } | |
517 | ||
1408cc1f YM |
518 | static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, |
519 | struct qed_ptt *p_ptt, u8 abs_vfid) | |
520 | { | |
521 | qed_wr(p_hwfn, p_ptt, | |
522 | PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, | |
523 | 1 << (abs_vfid & 0x1f)); | |
524 | } | |
525 | ||
dacd88d6 YM |
526 | static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, |
527 | struct qed_ptt *p_ptt, struct qed_vf_info *vf) | |
528 | { | |
529 | u16 igu_sb_id; | |
530 | int i; | |
531 | ||
532 | /* Set VF masks and configuration - pretend */ | |
533 | qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); | |
534 | ||
535 | qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); | |
536 | ||
537 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
538 | "value in VF_CONFIGURATION of vf %d after write %x\n", | |
539 | vf->abs_vf_id, | |
540 | qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION)); | |
541 | ||
542 | /* unpretend */ | |
543 | qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); | |
544 | ||
545 | /* iterate over all queues, clear sb consumer */ | |
546 | for (i = 0; i < vf->num_sbs; i++) { | |
547 | igu_sb_id = vf->igu_sbs[i]; | |
548 | /* Set then clear... */ | |
549 | qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, | |
550 | vf->opaque_fid); | |
551 | qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, | |
552 | vf->opaque_fid); | |
553 | } | |
554 | } | |
555 | ||
0b55e27d YM |
556 | static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, |
557 | struct qed_ptt *p_ptt, | |
558 | struct qed_vf_info *vf, bool enable) | |
559 | { | |
560 | u32 igu_vf_conf; | |
561 | ||
562 | qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); | |
563 | ||
564 | igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); | |
565 | ||
566 | if (enable) | |
567 | igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; | |
568 | else | |
569 | igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; | |
570 | ||
571 | qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); | |
572 | ||
573 | /* unpretend */ | |
574 | qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); | |
575 | } | |
576 | ||
1408cc1f YM |
577 | static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, |
578 | struct qed_ptt *p_ptt, | |
579 | struct qed_vf_info *vf) | |
580 | { | |
581 | u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; | |
582 | int rc; | |
583 | ||
0b55e27d YM |
584 | if (vf->to_disable) |
585 | return 0; | |
586 | ||
1408cc1f YM |
587 | DP_VERBOSE(p_hwfn, |
588 | QED_MSG_IOV, | |
589 | "Enable internal access for vf %x [abs %x]\n", | |
590 | vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); | |
591 | ||
592 | qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf)); | |
593 | ||
594 | rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs); | |
595 | if (rc) | |
596 | return rc; | |
597 | ||
598 | qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); | |
599 | ||
600 | SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); | |
601 | STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); | |
602 | ||
603 | qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, | |
604 | p_hwfn->hw_info.hw_mode); | |
605 | ||
606 | /* unpretend */ | |
607 | qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); | |
608 | ||
609 | if (vf->state != VF_STOPPED) { | |
610 | DP_NOTICE(p_hwfn, "VF[%02x] is already started\n", | |
611 | vf->abs_vf_id); | |
612 | return -EINVAL; | |
613 | } | |
614 | ||
615 | /* Start VF */ | |
616 | rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid); | |
617 | if (rc) | |
618 | DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); | |
619 | ||
620 | vf->state = VF_FREE; | |
621 | ||
622 | return rc; | |
623 | } | |
624 | ||
0b55e27d YM |
625 | /** |
626 | * @brief qed_iov_config_perm_table - configure the permission | |
627 | * zone table. | |
628 | * In E4, queue zone permission table size is 320x9. There | |
629 | * are 320 VF queues for single engine device (256 for dual | |
630 | * engine device), and each entry has the following format: | |
631 | * {Valid, VF[7:0]} | |
632 | * @param p_hwfn | |
633 | * @param p_ptt | |
634 | * @param vf | |
635 | * @param enable | |
636 | */ | |
637 | static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, | |
638 | struct qed_ptt *p_ptt, | |
639 | struct qed_vf_info *vf, u8 enable) | |
640 | { | |
641 | u32 reg_addr, val; | |
642 | u16 qzone_id = 0; | |
643 | int qid; | |
644 | ||
645 | for (qid = 0; qid < vf->num_rxqs; qid++) { | |
646 | qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, | |
647 | &qzone_id); | |
648 | ||
649 | reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; | |
650 | val = enable ? (vf->abs_vf_id | (1 << 8)) : 0; | |
651 | qed_wr(p_hwfn, p_ptt, reg_addr, val); | |
652 | } | |
653 | } | |
654 | ||
dacd88d6 YM |
655 | static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn, |
656 | struct qed_ptt *p_ptt, | |
657 | struct qed_vf_info *vf) | |
658 | { | |
659 | /* Reset vf in IGU - interrupts are still disabled */ | |
660 | qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); | |
661 | ||
662 | qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1); | |
663 | ||
664 | /* Permission Table */ | |
665 | qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true); | |
666 | } | |
667 | ||
1408cc1f YM |
668 | static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, |
669 | struct qed_ptt *p_ptt, | |
670 | struct qed_vf_info *vf, u16 num_rx_queues) | |
671 | { | |
672 | struct qed_igu_block *igu_blocks; | |
673 | int qid = 0, igu_id = 0; | |
674 | u32 val = 0; | |
675 | ||
676 | igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks; | |
677 | ||
678 | if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks) | |
679 | num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks; | |
680 | p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues; | |
681 | ||
682 | SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); | |
683 | SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); | |
684 | SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); | |
685 | ||
686 | while ((qid < num_rx_queues) && | |
687 | (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) { | |
688 | if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) { | |
689 | struct cau_sb_entry sb_entry; | |
690 | ||
691 | vf->igu_sbs[qid] = (u16)igu_id; | |
692 | igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE; | |
693 | ||
694 | SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); | |
695 | ||
696 | qed_wr(p_hwfn, p_ptt, | |
697 | IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id, | |
698 | val); | |
699 | ||
700 | /* Configure igu sb in CAU which were marked valid */ | |
701 | qed_init_cau_sb_entry(p_hwfn, &sb_entry, | |
702 | p_hwfn->rel_pf_id, | |
703 | vf->abs_vf_id, 1); | |
704 | qed_dmae_host2grc(p_hwfn, p_ptt, | |
705 | (u64)(uintptr_t)&sb_entry, | |
706 | CAU_REG_SB_VAR_MEMORY + | |
707 | igu_id * sizeof(u64), 2, 0); | |
708 | qid++; | |
709 | } | |
710 | igu_id++; | |
711 | } | |
712 | ||
713 | vf->num_sbs = (u8) num_rx_queues; | |
714 | ||
715 | return vf->num_sbs; | |
716 | } | |
717 | ||
0b55e27d YM |
718 | static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, |
719 | struct qed_ptt *p_ptt, | |
720 | struct qed_vf_info *vf) | |
721 | { | |
722 | struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; | |
723 | int idx, igu_id; | |
724 | u32 addr, val; | |
725 | ||
726 | /* Invalidate igu CAM lines and mark them as free */ | |
727 | for (idx = 0; idx < vf->num_sbs; idx++) { | |
728 | igu_id = vf->igu_sbs[idx]; | |
729 | addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id; | |
730 | ||
731 | val = qed_rd(p_hwfn, p_ptt, addr); | |
732 | SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); | |
733 | qed_wr(p_hwfn, p_ptt, addr, val); | |
734 | ||
735 | p_info->igu_map.igu_blocks[igu_id].status |= | |
736 | QED_IGU_STATUS_FREE; | |
737 | ||
738 | p_hwfn->hw_info.p_igu_info->free_blks++; | |
739 | } | |
740 | ||
741 | vf->num_sbs = 0; | |
742 | } | |
743 | ||
1408cc1f YM |
744 | static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, |
745 | struct qed_ptt *p_ptt, | |
746 | u16 rel_vf_id, u16 num_rx_queues) | |
747 | { | |
748 | u8 num_of_vf_avaiable_chains = 0; | |
749 | struct qed_vf_info *vf = NULL; | |
750 | int rc = 0; | |
751 | u32 cids; | |
752 | u8 i; | |
753 | ||
754 | vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); | |
755 | if (!vf) { | |
756 | DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); | |
757 | return -EINVAL; | |
758 | } | |
759 | ||
760 | if (vf->b_init) { | |
761 | DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id); | |
762 | return -EINVAL; | |
763 | } | |
764 | ||
765 | /* Limit number of queues according to number of CIDs */ | |
766 | qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); | |
767 | DP_VERBOSE(p_hwfn, | |
768 | QED_MSG_IOV, | |
769 | "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", | |
770 | vf->relative_vf_id, num_rx_queues, (u16) cids); | |
771 | num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids)); | |
772 | ||
773 | num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn, | |
774 | p_ptt, | |
775 | vf, | |
776 | num_rx_queues); | |
777 | if (!num_of_vf_avaiable_chains) { | |
778 | DP_ERR(p_hwfn, "no available igu sbs\n"); | |
779 | return -ENOMEM; | |
780 | } | |
781 | ||
782 | /* Choose queue number and index ranges */ | |
783 | vf->num_rxqs = num_of_vf_avaiable_chains; | |
784 | vf->num_txqs = num_of_vf_avaiable_chains; | |
785 | ||
786 | for (i = 0; i < vf->num_rxqs; i++) { | |
787 | u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn, | |
788 | vf->igu_sbs[i]); | |
789 | ||
790 | if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) { | |
791 | DP_NOTICE(p_hwfn, | |
792 | "VF[%d] will require utilizing of out-of-bounds queues - %04x\n", | |
793 | vf->relative_vf_id, queue_id); | |
794 | return -EINVAL; | |
795 | } | |
796 | ||
797 | /* CIDs are per-VF, so no problem having them 0-based. */ | |
798 | vf->vf_queues[i].fw_rx_qid = queue_id; | |
799 | vf->vf_queues[i].fw_tx_qid = queue_id; | |
800 | vf->vf_queues[i].fw_cid = i; | |
801 | ||
802 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
803 | "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n", | |
804 | vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i); | |
805 | } | |
806 | rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); | |
807 | if (!rc) { | |
808 | vf->b_init = true; | |
809 | ||
810 | if (IS_LEAD_HWFN(p_hwfn)) | |
811 | p_hwfn->cdev->p_iov_info->num_vfs++; | |
812 | } | |
813 | ||
814 | return rc; | |
815 | } | |
816 | ||
0b55e27d YM |
817 | static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, |
818 | struct qed_ptt *p_ptt, u16 rel_vf_id) | |
819 | { | |
820 | struct qed_vf_info *vf = NULL; | |
821 | int rc = 0; | |
822 | ||
823 | vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); | |
824 | if (!vf) { | |
825 | DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n"); | |
826 | return -EINVAL; | |
827 | } | |
828 | ||
36558c3d YM |
829 | if (vf->bulletin.p_virt) |
830 | memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt)); | |
831 | ||
832 | memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); | |
833 | ||
0b55e27d YM |
834 | if (vf->state != VF_STOPPED) { |
835 | /* Stopping the VF */ | |
836 | rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid); | |
837 | ||
838 | if (rc != 0) { | |
839 | DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", | |
840 | rc); | |
841 | return rc; | |
842 | } | |
843 | ||
844 | vf->state = VF_STOPPED; | |
845 | } | |
846 | ||
847 | /* disablng interrupts and resetting permission table was done during | |
848 | * vf-close, however, we could get here without going through vf_close | |
849 | */ | |
850 | /* Disable Interrupts for VF */ | |
851 | qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); | |
852 | ||
853 | /* Reset Permission table */ | |
854 | qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); | |
855 | ||
856 | vf->num_rxqs = 0; | |
857 | vf->num_txqs = 0; | |
858 | qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); | |
859 | ||
860 | if (vf->b_init) { | |
861 | vf->b_init = false; | |
862 | ||
863 | if (IS_LEAD_HWFN(p_hwfn)) | |
864 | p_hwfn->cdev->p_iov_info->num_vfs--; | |
865 | } | |
866 | ||
867 | return 0; | |
868 | } | |
869 | ||
37bff2b9 YM |
870 | static bool qed_iov_tlv_supported(u16 tlvtype) |
871 | { | |
872 | return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; | |
873 | } | |
874 | ||
875 | /* place a given tlv on the tlv buffer, continuing current tlv list */ | |
876 | void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) | |
877 | { | |
878 | struct channel_tlv *tl = (struct channel_tlv *)*offset; | |
879 | ||
880 | tl->type = type; | |
881 | tl->length = length; | |
882 | ||
883 | /* Offset should keep pointing to next TLV (the end of the last) */ | |
884 | *offset += length; | |
885 | ||
886 | /* Return a pointer to the start of the added tlv */ | |
887 | return *offset - length; | |
888 | } | |
889 | ||
890 | /* list the types and lengths of the tlvs on the buffer */ | |
891 | void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list) | |
892 | { | |
893 | u16 i = 1, total_length = 0; | |
894 | struct channel_tlv *tlv; | |
895 | ||
896 | do { | |
897 | tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); | |
898 | ||
899 | /* output tlv */ | |
900 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
901 | "TLV number %d: type %d, length %d\n", | |
902 | i, tlv->type, tlv->length); | |
903 | ||
904 | if (tlv->type == CHANNEL_TLV_LIST_END) | |
905 | return; | |
906 | ||
907 | /* Validate entry - protect against malicious VFs */ | |
908 | if (!tlv->length) { | |
909 | DP_NOTICE(p_hwfn, "TLV of length 0 found\n"); | |
910 | return; | |
911 | } | |
912 | ||
913 | total_length += tlv->length; | |
914 | ||
915 | if (total_length >= sizeof(struct tlv_buffer_size)) { | |
916 | DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n"); | |
917 | return; | |
918 | } | |
919 | ||
920 | i++; | |
921 | } while (1); | |
922 | } | |
923 | ||
924 | static void qed_iov_send_response(struct qed_hwfn *p_hwfn, | |
925 | struct qed_ptt *p_ptt, | |
926 | struct qed_vf_info *p_vf, | |
927 | u16 length, u8 status) | |
928 | { | |
929 | struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; | |
930 | struct qed_dmae_params params; | |
931 | u8 eng_vf_id; | |
932 | ||
933 | mbx->reply_virt->default_resp.hdr.status = status; | |
934 | ||
935 | qed_dp_tlv_list(p_hwfn, mbx->reply_virt); | |
936 | ||
937 | eng_vf_id = p_vf->abs_vf_id; | |
938 | ||
939 | memset(¶ms, 0, sizeof(struct qed_dmae_params)); | |
940 | params.flags = QED_DMAE_FLAG_VF_DST; | |
941 | params.dst_vfid = eng_vf_id; | |
942 | ||
943 | qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), | |
944 | mbx->req_virt->first_tlv.reply_address + | |
945 | sizeof(u64), | |
946 | (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, | |
947 | ¶ms); | |
948 | ||
949 | qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, | |
950 | mbx->req_virt->first_tlv.reply_address, | |
951 | sizeof(u64) / 4, ¶ms); | |
952 | ||
953 | REG_WR(p_hwfn, | |
954 | GTT_BAR0_MAP_REG_USDM_RAM + | |
955 | USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); | |
956 | } | |
957 | ||
dacd88d6 YM |
958 | static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn, |
959 | enum qed_iov_vport_update_flag flag) | |
960 | { | |
961 | switch (flag) { | |
962 | case QED_IOV_VP_UPDATE_ACTIVATE: | |
963 | return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; | |
17b235c1 YM |
964 | case QED_IOV_VP_UPDATE_VLAN_STRIP: |
965 | return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; | |
966 | case QED_IOV_VP_UPDATE_TX_SWITCH: | |
967 | return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; | |
dacd88d6 YM |
968 | case QED_IOV_VP_UPDATE_MCAST: |
969 | return CHANNEL_TLV_VPORT_UPDATE_MCAST; | |
970 | case QED_IOV_VP_UPDATE_ACCEPT_PARAM: | |
971 | return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; | |
972 | case QED_IOV_VP_UPDATE_RSS: | |
973 | return CHANNEL_TLV_VPORT_UPDATE_RSS; | |
17b235c1 YM |
974 | case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: |
975 | return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; | |
976 | case QED_IOV_VP_UPDATE_SGE_TPA: | |
977 | return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; | |
dacd88d6 YM |
978 | default: |
979 | return 0; | |
980 | } | |
981 | } | |
982 | ||
983 | static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn, | |
984 | struct qed_vf_info *p_vf, | |
985 | struct qed_iov_vf_mbx *p_mbx, | |
986 | u8 status, | |
987 | u16 tlvs_mask, u16 tlvs_accepted) | |
988 | { | |
989 | struct pfvf_def_resp_tlv *resp; | |
990 | u16 size, total_len, i; | |
991 | ||
992 | memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); | |
993 | p_mbx->offset = (u8 *)p_mbx->reply_virt; | |
994 | size = sizeof(struct pfvf_def_resp_tlv); | |
995 | total_len = size; | |
996 | ||
997 | qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); | |
998 | ||
999 | /* Prepare response for all extended tlvs if they are found by PF */ | |
1000 | for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) { | |
1001 | if (!(tlvs_mask & (1 << i))) | |
1002 | continue; | |
1003 | ||
1004 | resp = qed_add_tlv(p_hwfn, &p_mbx->offset, | |
1005 | qed_iov_vport_to_tlv(p_hwfn, i), size); | |
1006 | ||
1007 | if (tlvs_accepted & (1 << i)) | |
1008 | resp->hdr.status = status; | |
1009 | else | |
1010 | resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; | |
1011 | ||
1012 | DP_VERBOSE(p_hwfn, | |
1013 | QED_MSG_IOV, | |
1014 | "VF[%d] - vport_update response: TLV %d, status %02x\n", | |
1015 | p_vf->relative_vf_id, | |
1016 | qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); | |
1017 | ||
1018 | total_len += size; | |
1019 | } | |
1020 | ||
1021 | qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END, | |
1022 | sizeof(struct channel_list_end_tlv)); | |
1023 | ||
1024 | return total_len; | |
1025 | } | |
1026 | ||
37bff2b9 YM |
1027 | static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, |
1028 | struct qed_ptt *p_ptt, | |
1029 | struct qed_vf_info *vf_info, | |
1030 | u16 type, u16 length, u8 status) | |
1031 | { | |
1032 | struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx; | |
1033 | ||
1034 | mbx->offset = (u8 *)mbx->reply_virt; | |
1035 | ||
1036 | qed_add_tlv(p_hwfn, &mbx->offset, type, length); | |
1037 | qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, | |
1038 | sizeof(struct channel_list_end_tlv)); | |
1039 | ||
1040 | qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); | |
1041 | } | |
1042 | ||
0b55e27d YM |
1043 | struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn, |
1044 | u16 relative_vf_id, | |
1045 | bool b_enabled_only) | |
1046 | { | |
1047 | struct qed_vf_info *vf = NULL; | |
1048 | ||
1049 | vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); | |
1050 | if (!vf) | |
1051 | return NULL; | |
1052 | ||
1053 | return &vf->p_vf_info; | |
1054 | } | |
1055 | ||
1056 | void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) | |
1057 | { | |
1058 | struct qed_public_vf_info *vf_info; | |
1059 | ||
1060 | vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false); | |
1061 | ||
1062 | if (!vf_info) | |
1063 | return; | |
1064 | ||
1065 | /* Clear the VF mac */ | |
1066 | memset(vf_info->mac, 0, ETH_ALEN); | |
1067 | } | |
1068 | ||
1069 | static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, | |
1070 | struct qed_vf_info *p_vf) | |
1071 | { | |
1072 | u32 i; | |
1073 | ||
1074 | p_vf->vf_bulletin = 0; | |
dacd88d6 | 1075 | p_vf->vport_instance = 0; |
0b55e27d YM |
1076 | p_vf->num_mac_filters = 0; |
1077 | p_vf->num_vlan_filters = 0; | |
1078 | ||
1079 | /* If VF previously requested less resources, go back to default */ | |
1080 | p_vf->num_rxqs = p_vf->num_sbs; | |
1081 | p_vf->num_txqs = p_vf->num_sbs; | |
1082 | ||
dacd88d6 YM |
1083 | p_vf->num_active_rxqs = 0; |
1084 | ||
0b55e27d YM |
1085 | for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) |
1086 | p_vf->vf_queues[i].rxq_active = 0; | |
1087 | ||
1088 | qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); | |
1089 | } | |
1090 | ||
1408cc1f YM |
1091 | static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, |
1092 | struct qed_ptt *p_ptt, | |
1093 | struct qed_vf_info *vf) | |
37bff2b9 | 1094 | { |
1408cc1f YM |
1095 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; |
1096 | struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; | |
1097 | struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; | |
1098 | struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; | |
1099 | u8 i, vfpf_status = PFVF_STATUS_SUCCESS; | |
1100 | struct pf_vf_resc *resc = &resp->resc; | |
1101 | ||
1102 | /* Validate FW compatibility */ | |
1103 | if (req->vfdev_info.fw_major != FW_MAJOR_VERSION || | |
1104 | req->vfdev_info.fw_minor != FW_MINOR_VERSION || | |
1105 | req->vfdev_info.fw_revision != FW_REVISION_VERSION || | |
1106 | req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) { | |
1107 | DP_INFO(p_hwfn, | |
1108 | "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n", | |
1109 | vf->abs_vf_id, | |
1110 | req->vfdev_info.fw_major, | |
1111 | req->vfdev_info.fw_minor, | |
1112 | req->vfdev_info.fw_revision, | |
1113 | req->vfdev_info.fw_engineering, | |
1114 | FW_MAJOR_VERSION, | |
1115 | FW_MINOR_VERSION, | |
1116 | FW_REVISION_VERSION, FW_ENGINEERING_VERSION); | |
1117 | vfpf_status = PFVF_STATUS_NOT_SUPPORTED; | |
1118 | goto out; | |
1119 | } | |
1120 | ||
1121 | /* On 100g PFs, prevent old VFs from loading */ | |
1122 | if ((p_hwfn->cdev->num_hwfns > 1) && | |
1123 | !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { | |
1124 | DP_INFO(p_hwfn, | |
1125 | "VF[%d] is running an old driver that doesn't support 100g\n", | |
1126 | vf->abs_vf_id); | |
1127 | vfpf_status = PFVF_STATUS_NOT_SUPPORTED; | |
1128 | goto out; | |
1129 | } | |
1130 | ||
1131 | memset(resp, 0, sizeof(*resp)); | |
1132 | ||
1133 | /* Fill in vf info stuff */ | |
1134 | vf->opaque_fid = req->vfdev_info.opaque_fid; | |
1135 | vf->num_mac_filters = 1; | |
1136 | vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; | |
1137 | ||
1138 | vf->vf_bulletin = req->bulletin_addr; | |
1139 | vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? | |
1140 | vf->bulletin.size : req->bulletin_size; | |
1141 | ||
1142 | /* fill in pfdev info */ | |
1143 | pfdev_info->chip_num = p_hwfn->cdev->chip_num; | |
1144 | pfdev_info->db_size = 0; | |
1145 | pfdev_info->indices_per_sb = PIS_PER_SB; | |
1146 | ||
1147 | pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | | |
1148 | PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; | |
1149 | if (p_hwfn->cdev->num_hwfns > 1) | |
1150 | pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; | |
1151 | ||
1152 | pfdev_info->stats_info.mstats.address = | |
1153 | PXP_VF_BAR0_START_MSDM_ZONE_B + | |
1154 | offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat); | |
1155 | pfdev_info->stats_info.mstats.len = | |
1156 | sizeof(struct eth_mstorm_per_queue_stat); | |
1157 | ||
1158 | pfdev_info->stats_info.ustats.address = | |
1159 | PXP_VF_BAR0_START_USDM_ZONE_B + | |
1160 | offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat); | |
1161 | pfdev_info->stats_info.ustats.len = | |
1162 | sizeof(struct eth_ustorm_per_queue_stat); | |
1163 | ||
1164 | pfdev_info->stats_info.pstats.address = | |
1165 | PXP_VF_BAR0_START_PSDM_ZONE_B + | |
1166 | offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat); | |
1167 | pfdev_info->stats_info.pstats.len = | |
1168 | sizeof(struct eth_pstorm_per_queue_stat); | |
1169 | ||
1170 | pfdev_info->stats_info.tstats.address = 0; | |
1171 | pfdev_info->stats_info.tstats.len = 0; | |
1172 | ||
1173 | memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); | |
1174 | ||
1175 | pfdev_info->fw_major = FW_MAJOR_VERSION; | |
1176 | pfdev_info->fw_minor = FW_MINOR_VERSION; | |
1177 | pfdev_info->fw_rev = FW_REVISION_VERSION; | |
1178 | pfdev_info->fw_eng = FW_ENGINEERING_VERSION; | |
1179 | pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; | |
1180 | qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); | |
1181 | ||
1182 | pfdev_info->dev_type = p_hwfn->cdev->type; | |
1183 | pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; | |
1184 | ||
1185 | resc->num_rxqs = vf->num_rxqs; | |
1186 | resc->num_txqs = vf->num_txqs; | |
1187 | resc->num_sbs = vf->num_sbs; | |
1188 | for (i = 0; i < resc->num_sbs; i++) { | |
1189 | resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i]; | |
1190 | resc->hw_sbs[i].sb_qid = 0; | |
1191 | } | |
1192 | ||
1193 | for (i = 0; i < resc->num_rxqs; i++) { | |
1194 | qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid, | |
1195 | (u16 *)&resc->hw_qid[i]); | |
1196 | resc->cid[i] = vf->vf_queues[i].fw_cid; | |
1197 | } | |
1198 | ||
1199 | resc->num_mac_filters = min_t(u8, vf->num_mac_filters, | |
1200 | req->resc_request.num_mac_filters); | |
1201 | resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters, | |
1202 | req->resc_request.num_vlan_filters); | |
1203 | ||
1204 | /* This isn't really required as VF isn't limited, but some VFs might | |
1205 | * actually test this value, so need to provide it. | |
1206 | */ | |
1207 | resc->num_mc_filters = req->resc_request.num_mc_filters; | |
1208 | ||
1209 | /* Fill agreed size of bulletin board in response */ | |
1210 | resp->bulletin_size = vf->bulletin.size; | |
36558c3d | 1211 | qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); |
1408cc1f YM |
1212 | |
1213 | DP_VERBOSE(p_hwfn, | |
1214 | QED_MSG_IOV, | |
1215 | "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n" | |
1216 | "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n", | |
1217 | vf->abs_vf_id, | |
1218 | resp->pfdev_info.chip_num, | |
1219 | resp->pfdev_info.db_size, | |
1220 | resp->pfdev_info.indices_per_sb, | |
1221 | resp->pfdev_info.capabilities, | |
1222 | resc->num_rxqs, | |
1223 | resc->num_txqs, | |
1224 | resc->num_sbs, | |
1225 | resc->num_mac_filters, | |
1226 | resc->num_vlan_filters); | |
1227 | vf->state = VF_ACQUIRED; | |
1228 | ||
1229 | /* Prepare Response */ | |
1230 | out: | |
1231 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE, | |
1232 | sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); | |
37bff2b9 YM |
1233 | } |
1234 | ||
dacd88d6 YM |
1235 | static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, |
1236 | struct qed_ptt *p_ptt, | |
1237 | struct qed_vf_info *vf) | |
1238 | { | |
1239 | struct qed_sp_vport_start_params params = { 0 }; | |
1240 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; | |
1241 | struct vfpf_vport_start_tlv *start; | |
1242 | u8 status = PFVF_STATUS_SUCCESS; | |
1243 | struct qed_vf_info *vf_info; | |
1244 | int sb_id; | |
1245 | int rc; | |
1246 | ||
1247 | vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true); | |
1248 | if (!vf_info) { | |
1249 | DP_NOTICE(p_hwfn->cdev, | |
1250 | "Failed to get VF info, invalid vfid [%d]\n", | |
1251 | vf->relative_vf_id); | |
1252 | return; | |
1253 | } | |
1254 | ||
1255 | vf->state = VF_ENABLED; | |
1256 | start = &mbx->req_virt->start_vport; | |
1257 | ||
1258 | /* Initialize Status block in CAU */ | |
1259 | for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { | |
1260 | if (!start->sb_addr[sb_id]) { | |
1261 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
1262 | "VF[%d] did not fill the address of SB %d\n", | |
1263 | vf->relative_vf_id, sb_id); | |
1264 | break; | |
1265 | } | |
1266 | ||
1267 | qed_int_cau_conf_sb(p_hwfn, p_ptt, | |
1268 | start->sb_addr[sb_id], | |
1269 | vf->igu_sbs[sb_id], | |
1270 | vf->abs_vf_id, 1); | |
1271 | } | |
1272 | qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); | |
1273 | ||
1274 | vf->mtu = start->mtu; | |
1275 | ||
1276 | params.tpa_mode = start->tpa_mode; | |
1277 | params.remove_inner_vlan = start->inner_vlan_removal; | |
1278 | ||
1279 | params.drop_ttl0 = false; | |
1280 | params.concrete_fid = vf->concrete_fid; | |
1281 | params.opaque_fid = vf->opaque_fid; | |
1282 | params.vport_id = vf->vport_id; | |
1283 | params.max_buffers_per_cqe = start->max_buffers_per_cqe; | |
1284 | params.mtu = vf->mtu; | |
1285 | ||
1286 | rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); | |
1287 | if (rc != 0) { | |
1288 | DP_ERR(p_hwfn, | |
1289 | "qed_iov_vf_mbx_start_vport returned error %d\n", rc); | |
1290 | status = PFVF_STATUS_FAILURE; | |
1291 | } else { | |
1292 | vf->vport_instance++; | |
1293 | } | |
1294 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, | |
1295 | sizeof(struct pfvf_def_resp_tlv), status); | |
1296 | } | |
1297 | ||
1298 | static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, | |
1299 | struct qed_ptt *p_ptt, | |
1300 | struct qed_vf_info *vf) | |
1301 | { | |
1302 | u8 status = PFVF_STATUS_SUCCESS; | |
1303 | int rc; | |
1304 | ||
1305 | vf->vport_instance--; | |
1306 | ||
1307 | rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); | |
1308 | if (rc != 0) { | |
1309 | DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", | |
1310 | rc); | |
1311 | status = PFVF_STATUS_FAILURE; | |
1312 | } | |
1313 | ||
1314 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, | |
1315 | sizeof(struct pfvf_def_resp_tlv), status); | |
1316 | } | |
1317 | ||
1318 | #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A | |
1319 | #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ | |
1320 | (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) | |
1321 | ||
1322 | static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, | |
1323 | struct qed_ptt *p_ptt, | |
1324 | struct qed_vf_info *vf, u8 status) | |
1325 | { | |
1326 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; | |
1327 | struct pfvf_start_queue_resp_tlv *p_tlv; | |
1328 | struct vfpf_start_rxq_tlv *req; | |
1329 | ||
1330 | mbx->offset = (u8 *)mbx->reply_virt; | |
1331 | ||
1332 | p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, | |
1333 | sizeof(*p_tlv)); | |
1334 | qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, | |
1335 | sizeof(struct channel_list_end_tlv)); | |
1336 | ||
1337 | /* Update the TLV with the response */ | |
1338 | if (status == PFVF_STATUS_SUCCESS) { | |
1339 | u16 hw_qid = 0; | |
1340 | ||
1341 | req = &mbx->req_virt->start_rxq; | |
1342 | qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid, | |
1343 | &hw_qid); | |
1344 | ||
1345 | p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) + | |
1346 | hw_qid * MSTORM_QZONE_SIZE + | |
1347 | offsetof(struct mstorm_eth_queue_zone, | |
1348 | rx_producers); | |
1349 | } | |
1350 | ||
1351 | qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status); | |
1352 | } | |
1353 | ||
1354 | static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, | |
1355 | struct qed_ptt *p_ptt, | |
1356 | struct qed_vf_info *vf) | |
1357 | { | |
1358 | struct qed_queue_start_common_params params; | |
1359 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; | |
1360 | u8 status = PFVF_STATUS_SUCCESS; | |
1361 | struct vfpf_start_rxq_tlv *req; | |
1362 | int rc; | |
1363 | ||
1364 | memset(¶ms, 0, sizeof(params)); | |
1365 | req = &mbx->req_virt->start_rxq; | |
1366 | params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid; | |
1367 | params.vport_id = vf->vport_id; | |
1368 | params.sb = req->hw_sb; | |
1369 | params.sb_idx = req->sb_index; | |
1370 | ||
1371 | rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid, | |
1372 | vf->vf_queues[req->rx_qid].fw_cid, | |
1373 | ¶ms, | |
1374 | vf->abs_vf_id + 0x10, | |
1375 | req->bd_max_bytes, | |
1376 | req->rxq_addr, | |
1377 | req->cqe_pbl_addr, req->cqe_pbl_size); | |
1378 | ||
1379 | if (rc) { | |
1380 | status = PFVF_STATUS_FAILURE; | |
1381 | } else { | |
1382 | vf->vf_queues[req->rx_qid].rxq_active = true; | |
1383 | vf->num_active_rxqs++; | |
1384 | } | |
1385 | ||
1386 | qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status); | |
1387 | } | |
1388 | ||
1389 | static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, | |
1390 | struct qed_ptt *p_ptt, | |
1391 | struct qed_vf_info *vf) | |
1392 | { | |
1393 | u16 length = sizeof(struct pfvf_def_resp_tlv); | |
1394 | struct qed_queue_start_common_params params; | |
1395 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; | |
1396 | union qed_qm_pq_params pq_params; | |
1397 | u8 status = PFVF_STATUS_SUCCESS; | |
1398 | struct vfpf_start_txq_tlv *req; | |
1399 | int rc; | |
1400 | ||
1401 | /* Prepare the parameters which would choose the right PQ */ | |
1402 | memset(&pq_params, 0, sizeof(pq_params)); | |
1403 | pq_params.eth.is_vf = 1; | |
1404 | pq_params.eth.vf_id = vf->relative_vf_id; | |
1405 | ||
1406 | memset(¶ms, 0, sizeof(params)); | |
1407 | req = &mbx->req_virt->start_txq; | |
1408 | params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid; | |
1409 | params.vport_id = vf->vport_id; | |
1410 | params.sb = req->hw_sb; | |
1411 | params.sb_idx = req->sb_index; | |
1412 | ||
1413 | rc = qed_sp_eth_txq_start_ramrod(p_hwfn, | |
1414 | vf->opaque_fid, | |
1415 | vf->vf_queues[req->tx_qid].fw_cid, | |
1416 | ¶ms, | |
1417 | vf->abs_vf_id + 0x10, | |
1418 | req->pbl_addr, | |
1419 | req->pbl_size, &pq_params); | |
1420 | ||
1421 | if (rc) | |
1422 | status = PFVF_STATUS_FAILURE; | |
1423 | else | |
1424 | vf->vf_queues[req->tx_qid].txq_active = true; | |
1425 | ||
1426 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ, | |
1427 | length, status); | |
1428 | } | |
1429 | ||
1430 | static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, | |
1431 | struct qed_vf_info *vf, | |
1432 | u16 rxq_id, u8 num_rxqs, bool cqe_completion) | |
1433 | { | |
1434 | int rc = 0; | |
1435 | int qid; | |
1436 | ||
1437 | if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues)) | |
1438 | return -EINVAL; | |
1439 | ||
1440 | for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) { | |
1441 | if (vf->vf_queues[qid].rxq_active) { | |
1442 | rc = qed_sp_eth_rx_queue_stop(p_hwfn, | |
1443 | vf->vf_queues[qid]. | |
1444 | fw_rx_qid, false, | |
1445 | cqe_completion); | |
1446 | ||
1447 | if (rc) | |
1448 | return rc; | |
1449 | } | |
1450 | vf->vf_queues[qid].rxq_active = false; | |
1451 | vf->num_active_rxqs--; | |
1452 | } | |
1453 | ||
1454 | return rc; | |
1455 | } | |
1456 | ||
1457 | static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, | |
1458 | struct qed_vf_info *vf, u16 txq_id, u8 num_txqs) | |
1459 | { | |
1460 | int rc = 0; | |
1461 | int qid; | |
1462 | ||
1463 | if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues)) | |
1464 | return -EINVAL; | |
1465 | ||
1466 | for (qid = txq_id; qid < txq_id + num_txqs; qid++) { | |
1467 | if (vf->vf_queues[qid].txq_active) { | |
1468 | rc = qed_sp_eth_tx_queue_stop(p_hwfn, | |
1469 | vf->vf_queues[qid]. | |
1470 | fw_tx_qid); | |
1471 | ||
1472 | if (rc) | |
1473 | return rc; | |
1474 | } | |
1475 | vf->vf_queues[qid].txq_active = false; | |
1476 | } | |
1477 | return rc; | |
1478 | } | |
1479 | ||
1480 | static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, | |
1481 | struct qed_ptt *p_ptt, | |
1482 | struct qed_vf_info *vf) | |
1483 | { | |
1484 | u16 length = sizeof(struct pfvf_def_resp_tlv); | |
1485 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; | |
1486 | u8 status = PFVF_STATUS_SUCCESS; | |
1487 | struct vfpf_stop_rxqs_tlv *req; | |
1488 | int rc; | |
1489 | ||
1490 | /* We give the option of starting from qid != 0, in this case we | |
1491 | * need to make sure that qid + num_qs doesn't exceed the actual | |
1492 | * amount of queues that exist. | |
1493 | */ | |
1494 | req = &mbx->req_virt->stop_rxqs; | |
1495 | rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, | |
1496 | req->num_rxqs, req->cqe_completion); | |
1497 | if (rc) | |
1498 | status = PFVF_STATUS_FAILURE; | |
1499 | ||
1500 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, | |
1501 | length, status); | |
1502 | } | |
1503 | ||
1504 | static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, | |
1505 | struct qed_ptt *p_ptt, | |
1506 | struct qed_vf_info *vf) | |
1507 | { | |
1508 | u16 length = sizeof(struct pfvf_def_resp_tlv); | |
1509 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; | |
1510 | u8 status = PFVF_STATUS_SUCCESS; | |
1511 | struct vfpf_stop_txqs_tlv *req; | |
1512 | int rc; | |
1513 | ||
1514 | /* We give the option of starting from qid != 0, in this case we | |
1515 | * need to make sure that qid + num_qs doesn't exceed the actual | |
1516 | * amount of queues that exist. | |
1517 | */ | |
1518 | req = &mbx->req_virt->stop_txqs; | |
1519 | rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs); | |
1520 | if (rc) | |
1521 | status = PFVF_STATUS_FAILURE; | |
1522 | ||
1523 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, | |
1524 | length, status); | |
1525 | } | |
1526 | ||
17b235c1 YM |
1527 | static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, |
1528 | struct qed_ptt *p_ptt, | |
1529 | struct qed_vf_info *vf) | |
1530 | { | |
1531 | u16 length = sizeof(struct pfvf_def_resp_tlv); | |
1532 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; | |
1533 | struct vfpf_update_rxq_tlv *req; | |
1534 | u8 status = PFVF_STATUS_SUCCESS; | |
1535 | u8 complete_event_flg; | |
1536 | u8 complete_cqe_flg; | |
1537 | u16 qid; | |
1538 | int rc; | |
1539 | u8 i; | |
1540 | ||
1541 | req = &mbx->req_virt->update_rxq; | |
1542 | complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); | |
1543 | complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); | |
1544 | ||
1545 | for (i = 0; i < req->num_rxqs; i++) { | |
1546 | qid = req->rx_qid + i; | |
1547 | ||
1548 | if (!vf->vf_queues[qid].rxq_active) { | |
1549 | DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n", | |
1550 | qid); | |
1551 | status = PFVF_STATUS_FAILURE; | |
1552 | break; | |
1553 | } | |
1554 | ||
1555 | rc = qed_sp_eth_rx_queues_update(p_hwfn, | |
1556 | vf->vf_queues[qid].fw_rx_qid, | |
1557 | 1, | |
1558 | complete_cqe_flg, | |
1559 | complete_event_flg, | |
1560 | QED_SPQ_MODE_EBLOCK, NULL); | |
1561 | ||
1562 | if (rc) { | |
1563 | status = PFVF_STATUS_FAILURE; | |
1564 | break; | |
1565 | } | |
1566 | } | |
1567 | ||
1568 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, | |
1569 | length, status); | |
1570 | } | |
1571 | ||
dacd88d6 YM |
1572 | void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, |
1573 | void *p_tlvs_list, u16 req_type) | |
1574 | { | |
1575 | struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; | |
1576 | int len = 0; | |
1577 | ||
1578 | do { | |
1579 | if (!p_tlv->length) { | |
1580 | DP_NOTICE(p_hwfn, "Zero length TLV found\n"); | |
1581 | return NULL; | |
1582 | } | |
1583 | ||
1584 | if (p_tlv->type == req_type) { | |
1585 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
1586 | "Extended tlv type %d, length %d found\n", | |
1587 | p_tlv->type, p_tlv->length); | |
1588 | return p_tlv; | |
1589 | } | |
1590 | ||
1591 | len += p_tlv->length; | |
1592 | p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); | |
1593 | ||
1594 | if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { | |
1595 | DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n"); | |
1596 | return NULL; | |
1597 | } | |
1598 | } while (p_tlv->type != CHANNEL_TLV_LIST_END); | |
1599 | ||
1600 | return NULL; | |
1601 | } | |
1602 | ||
1603 | static void | |
1604 | qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn, | |
1605 | struct qed_sp_vport_update_params *p_data, | |
1606 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) | |
1607 | { | |
1608 | struct vfpf_vport_update_activate_tlv *p_act_tlv; | |
1609 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; | |
1610 | ||
1611 | p_act_tlv = (struct vfpf_vport_update_activate_tlv *) | |
1612 | qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); | |
1613 | if (!p_act_tlv) | |
1614 | return; | |
1615 | ||
1616 | p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; | |
1617 | p_data->vport_active_rx_flg = p_act_tlv->active_rx; | |
1618 | p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; | |
1619 | p_data->vport_active_tx_flg = p_act_tlv->active_tx; | |
1620 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE; | |
1621 | } | |
1622 | ||
17b235c1 YM |
1623 | static void |
1624 | qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, | |
1625 | struct qed_sp_vport_update_params *p_data, | |
1626 | struct qed_vf_info *p_vf, | |
1627 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) | |
1628 | { | |
1629 | struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; | |
1630 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; | |
1631 | ||
1632 | p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) | |
1633 | qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); | |
1634 | if (!p_vlan_tlv) | |
1635 | return; | |
1636 | ||
1637 | p_data->update_inner_vlan_removal_flg = 1; | |
1638 | p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; | |
1639 | ||
1640 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP; | |
1641 | } | |
1642 | ||
1643 | static void | |
1644 | qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn, | |
1645 | struct qed_sp_vport_update_params *p_data, | |
1646 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) | |
1647 | { | |
1648 | struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; | |
1649 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; | |
1650 | ||
1651 | p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) | |
1652 | qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, | |
1653 | tlv); | |
1654 | if (!p_tx_switch_tlv) | |
1655 | return; | |
1656 | ||
1657 | p_data->update_tx_switching_flg = 1; | |
1658 | p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; | |
1659 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH; | |
1660 | } | |
1661 | ||
dacd88d6 YM |
1662 | static void |
1663 | qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, | |
1664 | struct qed_sp_vport_update_params *p_data, | |
1665 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) | |
1666 | { | |
1667 | struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; | |
1668 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; | |
1669 | ||
1670 | p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) | |
1671 | qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); | |
1672 | if (!p_mcast_tlv) | |
1673 | return; | |
1674 | ||
1675 | p_data->update_approx_mcast_flg = 1; | |
1676 | memcpy(p_data->bins, p_mcast_tlv->bins, | |
1677 | sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); | |
1678 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; | |
1679 | } | |
1680 | ||
1681 | static void | |
1682 | qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn, | |
1683 | struct qed_sp_vport_update_params *p_data, | |
1684 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) | |
1685 | { | |
1686 | struct qed_filter_accept_flags *p_flags = &p_data->accept_flags; | |
1687 | struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; | |
1688 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; | |
1689 | ||
1690 | p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) | |
1691 | qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); | |
1692 | if (!p_accept_tlv) | |
1693 | return; | |
1694 | ||
1695 | p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; | |
1696 | p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; | |
1697 | p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; | |
1698 | p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; | |
1699 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM; | |
1700 | } | |
1701 | ||
17b235c1 YM |
1702 | static void |
1703 | qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn, | |
1704 | struct qed_sp_vport_update_params *p_data, | |
1705 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) | |
1706 | { | |
1707 | struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; | |
1708 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; | |
1709 | ||
1710 | p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) | |
1711 | qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, | |
1712 | tlv); | |
1713 | if (!p_accept_any_vlan) | |
1714 | return; | |
1715 | ||
1716 | p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; | |
1717 | p_data->update_accept_any_vlan_flg = | |
1718 | p_accept_any_vlan->update_accept_any_vlan_flg; | |
1719 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; | |
1720 | } | |
1721 | ||
dacd88d6 YM |
1722 | static void |
1723 | qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, | |
1724 | struct qed_vf_info *vf, | |
1725 | struct qed_sp_vport_update_params *p_data, | |
1726 | struct qed_rss_params *p_rss, | |
1727 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) | |
1728 | { | |
1729 | struct vfpf_vport_update_rss_tlv *p_rss_tlv; | |
1730 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; | |
1731 | u16 i, q_idx, max_q_idx; | |
1732 | u16 table_size; | |
1733 | ||
1734 | p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) | |
1735 | qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); | |
1736 | if (!p_rss_tlv) { | |
1737 | p_data->rss_params = NULL; | |
1738 | return; | |
1739 | } | |
1740 | ||
1741 | memset(p_rss, 0, sizeof(struct qed_rss_params)); | |
1742 | ||
1743 | p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags & | |
1744 | VFPF_UPDATE_RSS_CONFIG_FLAG); | |
1745 | p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags & | |
1746 | VFPF_UPDATE_RSS_CAPS_FLAG); | |
1747 | p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags & | |
1748 | VFPF_UPDATE_RSS_IND_TABLE_FLAG); | |
1749 | p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags & | |
1750 | VFPF_UPDATE_RSS_KEY_FLAG); | |
1751 | ||
1752 | p_rss->rss_enable = p_rss_tlv->rss_enable; | |
1753 | p_rss->rss_eng_id = vf->relative_vf_id + 1; | |
1754 | p_rss->rss_caps = p_rss_tlv->rss_caps; | |
1755 | p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; | |
1756 | memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table, | |
1757 | sizeof(p_rss->rss_ind_table)); | |
1758 | memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); | |
1759 | ||
1760 | table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), | |
1761 | (1 << p_rss_tlv->rss_table_size_log)); | |
1762 | ||
1763 | max_q_idx = ARRAY_SIZE(vf->vf_queues); | |
1764 | ||
1765 | for (i = 0; i < table_size; i++) { | |
1766 | u16 index = vf->vf_queues[0].fw_rx_qid; | |
1767 | ||
1768 | q_idx = p_rss->rss_ind_table[i]; | |
1769 | if (q_idx >= max_q_idx) | |
1770 | DP_NOTICE(p_hwfn, | |
1771 | "rss_ind_table[%d] = %d, rxq is out of range\n", | |
1772 | i, q_idx); | |
1773 | else if (!vf->vf_queues[q_idx].rxq_active) | |
1774 | DP_NOTICE(p_hwfn, | |
1775 | "rss_ind_table[%d] = %d, rxq is not active\n", | |
1776 | i, q_idx); | |
1777 | else | |
1778 | index = vf->vf_queues[q_idx].fw_rx_qid; | |
1779 | p_rss->rss_ind_table[i] = index; | |
1780 | } | |
1781 | ||
1782 | p_data->rss_params = p_rss; | |
1783 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; | |
1784 | } | |
1785 | ||
17b235c1 YM |
1786 | static void |
1787 | qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, | |
1788 | struct qed_vf_info *vf, | |
1789 | struct qed_sp_vport_update_params *p_data, | |
1790 | struct qed_sge_tpa_params *p_sge_tpa, | |
1791 | struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) | |
1792 | { | |
1793 | struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; | |
1794 | u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; | |
1795 | ||
1796 | p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) | |
1797 | qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); | |
1798 | ||
1799 | if (!p_sge_tpa_tlv) { | |
1800 | p_data->sge_tpa_params = NULL; | |
1801 | return; | |
1802 | } | |
1803 | ||
1804 | memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params)); | |
1805 | ||
1806 | p_sge_tpa->update_tpa_en_flg = | |
1807 | !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); | |
1808 | p_sge_tpa->update_tpa_param_flg = | |
1809 | !!(p_sge_tpa_tlv->update_sge_tpa_flags & | |
1810 | VFPF_UPDATE_TPA_PARAM_FLAG); | |
1811 | ||
1812 | p_sge_tpa->tpa_ipv4_en_flg = | |
1813 | !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); | |
1814 | p_sge_tpa->tpa_ipv6_en_flg = | |
1815 | !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); | |
1816 | p_sge_tpa->tpa_pkt_split_flg = | |
1817 | !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); | |
1818 | p_sge_tpa->tpa_hdr_data_split_flg = | |
1819 | !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); | |
1820 | p_sge_tpa->tpa_gro_consistent_flg = | |
1821 | !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); | |
1822 | ||
1823 | p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; | |
1824 | p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; | |
1825 | p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; | |
1826 | p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; | |
1827 | p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; | |
1828 | ||
1829 | p_data->sge_tpa_params = p_sge_tpa; | |
1830 | ||
1831 | *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; | |
1832 | } | |
1833 | ||
dacd88d6 YM |
1834 | static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, |
1835 | struct qed_ptt *p_ptt, | |
1836 | struct qed_vf_info *vf) | |
1837 | { | |
1838 | struct qed_sp_vport_update_params params; | |
1839 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; | |
17b235c1 | 1840 | struct qed_sge_tpa_params sge_tpa_params; |
dacd88d6 YM |
1841 | struct qed_rss_params rss_params; |
1842 | u8 status = PFVF_STATUS_SUCCESS; | |
1843 | u16 tlvs_mask = 0; | |
1844 | u16 length; | |
1845 | int rc; | |
1846 | ||
1847 | memset(¶ms, 0, sizeof(params)); | |
1848 | params.opaque_fid = vf->opaque_fid; | |
1849 | params.vport_id = vf->vport_id; | |
1850 | params.rss_params = NULL; | |
1851 | ||
1852 | /* Search for extended tlvs list and update values | |
1853 | * from VF in struct qed_sp_vport_update_params. | |
1854 | */ | |
1855 | qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask); | |
17b235c1 YM |
1856 | qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask); |
1857 | qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); | |
dacd88d6 YM |
1858 | qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); |
1859 | qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); | |
1860 | qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, &rss_params, | |
1861 | mbx, &tlvs_mask); | |
17b235c1 YM |
1862 | qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); |
1863 | qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, | |
1864 | &sge_tpa_params, mbx, &tlvs_mask); | |
dacd88d6 YM |
1865 | |
1866 | /* Just log a message if there is no single extended tlv in buffer. | |
1867 | * When all features of vport update ramrod would be requested by VF | |
1868 | * as extended TLVs in buffer then an error can be returned in response | |
1869 | * if there is no extended TLV present in buffer. | |
1870 | */ | |
1871 | if (!tlvs_mask) { | |
1872 | DP_NOTICE(p_hwfn, | |
1873 | "No feature tlvs found for vport update\n"); | |
1874 | status = PFVF_STATUS_NOT_SUPPORTED; | |
1875 | goto out; | |
1876 | } | |
1877 | ||
1878 | rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); | |
1879 | ||
1880 | if (rc) | |
1881 | status = PFVF_STATUS_FAILURE; | |
1882 | ||
1883 | out: | |
1884 | length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, | |
1885 | tlvs_mask, tlvs_mask); | |
1886 | qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); | |
1887 | } | |
1888 | ||
1889 | int qed_iov_chk_ucast(struct qed_hwfn *hwfn, | |
1890 | int vfid, struct qed_filter_ucast *params) | |
1891 | { | |
1892 | struct qed_public_vf_info *vf; | |
1893 | ||
1894 | vf = qed_iov_get_public_vf_info(hwfn, vfid, true); | |
1895 | if (!vf) | |
1896 | return -EINVAL; | |
1897 | ||
1898 | /* No real decision to make; Store the configured MAC */ | |
1899 | if (params->type == QED_FILTER_MAC || | |
1900 | params->type == QED_FILTER_MAC_VLAN) | |
1901 | ether_addr_copy(vf->mac, params->mac); | |
1902 | ||
1903 | return 0; | |
1904 | } | |
1905 | ||
1906 | static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, | |
1907 | struct qed_ptt *p_ptt, | |
1908 | struct qed_vf_info *vf) | |
1909 | { | |
1910 | struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; | |
1911 | struct vfpf_ucast_filter_tlv *req; | |
1912 | u8 status = PFVF_STATUS_SUCCESS; | |
1913 | struct qed_filter_ucast params; | |
1914 | int rc; | |
1915 | ||
1916 | /* Prepare the unicast filter params */ | |
1917 | memset(¶ms, 0, sizeof(struct qed_filter_ucast)); | |
1918 | req = &mbx->req_virt->ucast_filter; | |
1919 | params.opcode = (enum qed_filter_opcode)req->opcode; | |
1920 | params.type = (enum qed_filter_ucast_type)req->type; | |
1921 | ||
1922 | params.is_rx_filter = 1; | |
1923 | params.is_tx_filter = 1; | |
1924 | params.vport_to_remove_from = vf->vport_id; | |
1925 | params.vport_to_add_to = vf->vport_id; | |
1926 | memcpy(params.mac, req->mac, ETH_ALEN); | |
1927 | params.vlan = req->vlan; | |
1928 | ||
1929 | DP_VERBOSE(p_hwfn, | |
1930 | QED_MSG_IOV, | |
1931 | "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n", | |
1932 | vf->abs_vf_id, params.opcode, params.type, | |
1933 | params.is_rx_filter ? "RX" : "", | |
1934 | params.is_tx_filter ? "TX" : "", | |
1935 | params.vport_to_add_to, | |
1936 | params.mac[0], params.mac[1], | |
1937 | params.mac[2], params.mac[3], | |
1938 | params.mac[4], params.mac[5], params.vlan); | |
1939 | ||
1940 | if (!vf->vport_instance) { | |
1941 | DP_VERBOSE(p_hwfn, | |
1942 | QED_MSG_IOV, | |
1943 | "No VPORT instance available for VF[%d], failing ucast MAC configuration\n", | |
1944 | vf->abs_vf_id); | |
1945 | status = PFVF_STATUS_FAILURE; | |
1946 | goto out; | |
1947 | } | |
1948 | ||
1949 | rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms); | |
1950 | if (rc) { | |
1951 | status = PFVF_STATUS_FAILURE; | |
1952 | goto out; | |
1953 | } | |
1954 | ||
1955 | rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms, | |
1956 | QED_SPQ_MODE_CB, NULL); | |
1957 | if (rc) | |
1958 | status = PFVF_STATUS_FAILURE; | |
1959 | ||
1960 | out: | |
1961 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER, | |
1962 | sizeof(struct pfvf_def_resp_tlv), status); | |
1963 | } | |
1964 | ||
0b55e27d YM |
1965 | static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn, |
1966 | struct qed_ptt *p_ptt, | |
1967 | struct qed_vf_info *vf) | |
1968 | { | |
1969 | int i; | |
1970 | ||
1971 | /* Reset the SBs */ | |
1972 | for (i = 0; i < vf->num_sbs; i++) | |
1973 | qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, | |
1974 | vf->igu_sbs[i], | |
1975 | vf->opaque_fid, false); | |
1976 | ||
1977 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP, | |
1978 | sizeof(struct pfvf_def_resp_tlv), | |
1979 | PFVF_STATUS_SUCCESS); | |
1980 | } | |
1981 | ||
1982 | static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn, | |
1983 | struct qed_ptt *p_ptt, struct qed_vf_info *vf) | |
1984 | { | |
1985 | u16 length = sizeof(struct pfvf_def_resp_tlv); | |
1986 | u8 status = PFVF_STATUS_SUCCESS; | |
1987 | ||
1988 | /* Disable Interrupts for VF */ | |
1989 | qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); | |
1990 | ||
1991 | /* Reset Permission table */ | |
1992 | qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); | |
1993 | ||
1994 | qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE, | |
1995 | length, status); | |
1996 | } | |
1997 | ||
1998 | static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, | |
1999 | struct qed_ptt *p_ptt, | |
2000 | struct qed_vf_info *p_vf) | |
2001 | { | |
2002 | u16 length = sizeof(struct pfvf_def_resp_tlv); | |
2003 | ||
2004 | qed_iov_vf_cleanup(p_hwfn, p_vf); | |
2005 | ||
2006 | qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, | |
2007 | length, PFVF_STATUS_SUCCESS); | |
2008 | } | |
2009 | ||
2010 | static int | |
2011 | qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, | |
2012 | struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) | |
2013 | { | |
2014 | int cnt; | |
2015 | u32 val; | |
2016 | ||
2017 | qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); | |
2018 | ||
2019 | for (cnt = 0; cnt < 50; cnt++) { | |
2020 | val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); | |
2021 | if (!val) | |
2022 | break; | |
2023 | msleep(20); | |
2024 | } | |
2025 | qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); | |
2026 | ||
2027 | if (cnt == 50) { | |
2028 | DP_ERR(p_hwfn, | |
2029 | "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", | |
2030 | p_vf->abs_vf_id, val); | |
2031 | return -EBUSY; | |
2032 | } | |
2033 | ||
2034 | return 0; | |
2035 | } | |
2036 | ||
2037 | static int | |
2038 | qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, | |
2039 | struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) | |
2040 | { | |
2041 | u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS]; | |
2042 | int i, cnt; | |
2043 | ||
2044 | /* Read initial consumers & producers */ | |
2045 | for (i = 0; i < MAX_NUM_VOQS; i++) { | |
2046 | u32 prod; | |
2047 | ||
2048 | cons[i] = qed_rd(p_hwfn, p_ptt, | |
2049 | PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + | |
2050 | i * 0x40); | |
2051 | prod = qed_rd(p_hwfn, p_ptt, | |
2052 | PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 + | |
2053 | i * 0x40); | |
2054 | distance[i] = prod - cons[i]; | |
2055 | } | |
2056 | ||
2057 | /* Wait for consumers to pass the producers */ | |
2058 | i = 0; | |
2059 | for (cnt = 0; cnt < 50; cnt++) { | |
2060 | for (; i < MAX_NUM_VOQS; i++) { | |
2061 | u32 tmp; | |
2062 | ||
2063 | tmp = qed_rd(p_hwfn, p_ptt, | |
2064 | PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + | |
2065 | i * 0x40); | |
2066 | if (distance[i] > tmp - cons[i]) | |
2067 | break; | |
2068 | } | |
2069 | ||
2070 | if (i == MAX_NUM_VOQS) | |
2071 | break; | |
2072 | ||
2073 | msleep(20); | |
2074 | } | |
2075 | ||
2076 | if (cnt == 50) { | |
2077 | DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n", | |
2078 | p_vf->abs_vf_id, i); | |
2079 | return -EBUSY; | |
2080 | } | |
2081 | ||
2082 | return 0; | |
2083 | } | |
2084 | ||
2085 | static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn, | |
2086 | struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) | |
2087 | { | |
2088 | int rc; | |
2089 | ||
2090 | rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); | |
2091 | if (rc) | |
2092 | return rc; | |
2093 | ||
2094 | rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); | |
2095 | if (rc) | |
2096 | return rc; | |
2097 | ||
2098 | return 0; | |
2099 | } | |
2100 | ||
2101 | static int | |
2102 | qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, | |
2103 | struct qed_ptt *p_ptt, | |
2104 | u16 rel_vf_id, u32 *ack_vfs) | |
2105 | { | |
2106 | struct qed_vf_info *p_vf; | |
2107 | int rc = 0; | |
2108 | ||
2109 | p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); | |
2110 | if (!p_vf) | |
2111 | return 0; | |
2112 | ||
2113 | if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & | |
2114 | (1ULL << (rel_vf_id % 64))) { | |
2115 | u16 vfid = p_vf->abs_vf_id; | |
2116 | ||
2117 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
2118 | "VF[%d] - Handling FLR\n", vfid); | |
2119 | ||
2120 | qed_iov_vf_cleanup(p_hwfn, p_vf); | |
2121 | ||
2122 | /* If VF isn't active, no need for anything but SW */ | |
2123 | if (!p_vf->b_init) | |
2124 | goto cleanup; | |
2125 | ||
2126 | rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); | |
2127 | if (rc) | |
2128 | goto cleanup; | |
2129 | ||
2130 | rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true); | |
2131 | if (rc) { | |
2132 | DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid); | |
2133 | return rc; | |
2134 | } | |
2135 | ||
2136 | /* VF_STOPPED has to be set only after final cleanup | |
2137 | * but prior to re-enabling the VF. | |
2138 | */ | |
2139 | p_vf->state = VF_STOPPED; | |
2140 | ||
2141 | rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); | |
2142 | if (rc) { | |
2143 | DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", | |
2144 | vfid); | |
2145 | return rc; | |
2146 | } | |
2147 | cleanup: | |
2148 | /* Mark VF for ack and clean pending state */ | |
2149 | if (p_vf->state == VF_RESET) | |
2150 | p_vf->state = VF_STOPPED; | |
2151 | ack_vfs[vfid / 32] |= (1 << (vfid % 32)); | |
2152 | p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= | |
2153 | ~(1ULL << (rel_vf_id % 64)); | |
2154 | p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &= | |
2155 | ~(1ULL << (rel_vf_id % 64)); | |
2156 | } | |
2157 | ||
2158 | return rc; | |
2159 | } | |
2160 | ||
2161 | int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |
2162 | { | |
2163 | u32 ack_vfs[VF_MAX_STATIC / 32]; | |
2164 | int rc = 0; | |
2165 | u16 i; | |
2166 | ||
2167 | memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); | |
2168 | ||
2169 | /* Since BRB <-> PRS interface can't be tested as part of the flr | |
2170 | * polling due to HW limitations, simply sleep a bit. And since | |
2171 | * there's no need to wait per-vf, do it before looping. | |
2172 | */ | |
2173 | msleep(100); | |
2174 | ||
2175 | for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) | |
2176 | qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs); | |
2177 | ||
2178 | rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); | |
2179 | return rc; | |
2180 | } | |
2181 | ||
2182 | int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) | |
2183 | { | |
2184 | u16 i, found = 0; | |
2185 | ||
2186 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n"); | |
2187 | for (i = 0; i < (VF_MAX_STATIC / 32); i++) | |
2188 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
2189 | "[%08x,...,%08x]: %08x\n", | |
2190 | i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); | |
2191 | ||
2192 | if (!p_hwfn->cdev->p_iov_info) { | |
2193 | DP_NOTICE(p_hwfn, "VF flr but no IOV\n"); | |
2194 | return 0; | |
2195 | } | |
2196 | ||
2197 | /* Mark VFs */ | |
2198 | for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) { | |
2199 | struct qed_vf_info *p_vf; | |
2200 | u8 vfid; | |
2201 | ||
2202 | p_vf = qed_iov_get_vf_info(p_hwfn, i, false); | |
2203 | if (!p_vf) | |
2204 | continue; | |
2205 | ||
2206 | vfid = p_vf->abs_vf_id; | |
2207 | if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) { | |
2208 | u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; | |
2209 | u16 rel_vf_id = p_vf->relative_vf_id; | |
2210 | ||
2211 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
2212 | "VF[%d] [rel %d] got FLR-ed\n", | |
2213 | vfid, rel_vf_id); | |
2214 | ||
2215 | p_vf->state = VF_RESET; | |
2216 | ||
2217 | /* No need to lock here, since pending_flr should | |
2218 | * only change here and before ACKing MFw. Since | |
2219 | * MFW will not trigger an additional attention for | |
2220 | * VF flr until ACKs, we're safe. | |
2221 | */ | |
2222 | p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); | |
2223 | found = 1; | |
2224 | } | |
2225 | } | |
2226 | ||
2227 | return found; | |
2228 | } | |
2229 | ||
36558c3d YM |
2230 | void qed_iov_set_link(struct qed_hwfn *p_hwfn, |
2231 | u16 vfid, | |
2232 | struct qed_mcp_link_params *params, | |
2233 | struct qed_mcp_link_state *link, | |
2234 | struct qed_mcp_link_capabilities *p_caps) | |
2235 | { | |
2236 | struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, | |
2237 | vfid, | |
2238 | false); | |
2239 | struct qed_bulletin_content *p_bulletin; | |
2240 | ||
2241 | if (!p_vf) | |
2242 | return; | |
2243 | ||
2244 | p_bulletin = p_vf->bulletin.p_virt; | |
2245 | p_bulletin->req_autoneg = params->speed.autoneg; | |
2246 | p_bulletin->req_adv_speed = params->speed.advertised_speeds; | |
2247 | p_bulletin->req_forced_speed = params->speed.forced_speed; | |
2248 | p_bulletin->req_autoneg_pause = params->pause.autoneg; | |
2249 | p_bulletin->req_forced_rx = params->pause.forced_rx; | |
2250 | p_bulletin->req_forced_tx = params->pause.forced_tx; | |
2251 | p_bulletin->req_loopback = params->loopback_mode; | |
2252 | ||
2253 | p_bulletin->link_up = link->link_up; | |
2254 | p_bulletin->speed = link->speed; | |
2255 | p_bulletin->full_duplex = link->full_duplex; | |
2256 | p_bulletin->autoneg = link->an; | |
2257 | p_bulletin->autoneg_complete = link->an_complete; | |
2258 | p_bulletin->parallel_detection = link->parallel_detection; | |
2259 | p_bulletin->pfc_enabled = link->pfc_enabled; | |
2260 | p_bulletin->partner_adv_speed = link->partner_adv_speed; | |
2261 | p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; | |
2262 | p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; | |
2263 | p_bulletin->partner_adv_pause = link->partner_adv_pause; | |
2264 | p_bulletin->sfp_tx_fault = link->sfp_tx_fault; | |
2265 | ||
2266 | p_bulletin->capability_speed = p_caps->speed_capabilities; | |
2267 | } | |
2268 | ||
37bff2b9 YM |
2269 | static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, |
2270 | struct qed_ptt *p_ptt, int vfid) | |
2271 | { | |
2272 | struct qed_iov_vf_mbx *mbx; | |
2273 | struct qed_vf_info *p_vf; | |
2274 | int i; | |
2275 | ||
2276 | p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); | |
2277 | if (!p_vf) | |
2278 | return; | |
2279 | ||
2280 | mbx = &p_vf->vf_mbx; | |
2281 | ||
2282 | /* qed_iov_process_mbx_request */ | |
2283 | DP_VERBOSE(p_hwfn, | |
2284 | QED_MSG_IOV, | |
2285 | "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id); | |
2286 | ||
2287 | mbx->first_tlv = mbx->req_virt->first_tlv; | |
2288 | ||
2289 | /* check if tlv type is known */ | |
2290 | if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { | |
1408cc1f YM |
2291 | switch (mbx->first_tlv.tl.type) { |
2292 | case CHANNEL_TLV_ACQUIRE: | |
2293 | qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); | |
2294 | break; | |
dacd88d6 YM |
2295 | case CHANNEL_TLV_VPORT_START: |
2296 | qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); | |
2297 | break; | |
2298 | case CHANNEL_TLV_VPORT_TEARDOWN: | |
2299 | qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); | |
2300 | break; | |
2301 | case CHANNEL_TLV_START_RXQ: | |
2302 | qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); | |
2303 | break; | |
2304 | case CHANNEL_TLV_START_TXQ: | |
2305 | qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); | |
2306 | break; | |
2307 | case CHANNEL_TLV_STOP_RXQS: | |
2308 | qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); | |
2309 | break; | |
2310 | case CHANNEL_TLV_STOP_TXQS: | |
2311 | qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); | |
2312 | break; | |
17b235c1 YM |
2313 | case CHANNEL_TLV_UPDATE_RXQ: |
2314 | qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); | |
2315 | break; | |
dacd88d6 YM |
2316 | case CHANNEL_TLV_VPORT_UPDATE: |
2317 | qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); | |
2318 | break; | |
2319 | case CHANNEL_TLV_UCAST_FILTER: | |
2320 | qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); | |
2321 | break; | |
0b55e27d YM |
2322 | case CHANNEL_TLV_CLOSE: |
2323 | qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); | |
2324 | break; | |
2325 | case CHANNEL_TLV_INT_CLEANUP: | |
2326 | qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); | |
2327 | break; | |
2328 | case CHANNEL_TLV_RELEASE: | |
2329 | qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); | |
2330 | break; | |
1408cc1f | 2331 | } |
37bff2b9 YM |
2332 | } else { |
2333 | /* unknown TLV - this may belong to a VF driver from the future | |
2334 | * - a version written after this PF driver was written, which | |
2335 | * supports features unknown as of yet. Too bad since we don't | |
2336 | * support them. Or this may be because someone wrote a crappy | |
2337 | * VF driver and is sending garbage over the channel. | |
2338 | */ | |
2339 | DP_ERR(p_hwfn, | |
2340 | "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n", | |
2341 | mbx->first_tlv.tl.type, mbx->first_tlv.tl.length); | |
2342 | ||
2343 | for (i = 0; i < 20; i++) { | |
2344 | DP_VERBOSE(p_hwfn, | |
2345 | QED_MSG_IOV, | |
2346 | "%x ", | |
2347 | mbx->req_virt->tlv_buf_size.tlv_buffer[i]); | |
2348 | } | |
2349 | } | |
2350 | } | |
2351 | ||
2352 | void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid) | |
2353 | { | |
2354 | u64 add_bit = 1ULL << (vfid % 64); | |
2355 | ||
2356 | p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit; | |
2357 | } | |
2358 | ||
2359 | static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn, | |
2360 | u64 *events) | |
2361 | { | |
2362 | u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events; | |
2363 | ||
2364 | memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH); | |
2365 | memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); | |
2366 | } | |
2367 | ||
2368 | static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, | |
2369 | u16 abs_vfid, struct regpair *vf_msg) | |
2370 | { | |
2371 | u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf; | |
2372 | struct qed_vf_info *p_vf; | |
2373 | ||
2374 | if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) { | |
2375 | DP_VERBOSE(p_hwfn, | |
2376 | QED_MSG_IOV, | |
2377 | "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n", | |
2378 | abs_vfid); | |
2379 | return 0; | |
2380 | } | |
2381 | p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; | |
2382 | ||
2383 | /* List the physical address of the request so that handler | |
2384 | * could later on copy the message from it. | |
2385 | */ | |
2386 | p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; | |
2387 | ||
2388 | /* Mark the event and schedule the workqueue */ | |
2389 | qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id); | |
2390 | qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); | |
2391 | ||
2392 | return 0; | |
2393 | } | |
2394 | ||
2395 | int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, | |
2396 | u8 opcode, __le16 echo, union event_ring_data *data) | |
2397 | { | |
2398 | switch (opcode) { | |
2399 | case COMMON_EVENT_VF_PF_CHANNEL: | |
2400 | return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), | |
2401 | &data->vf_pf_channel.msg_addr); | |
2402 | default: | |
2403 | DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", | |
2404 | opcode); | |
2405 | return -EINVAL; | |
2406 | } | |
2407 | } | |
2408 | ||
32a47e72 YM |
2409 | u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) |
2410 | { | |
2411 | struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; | |
2412 | u16 i; | |
2413 | ||
2414 | if (!p_iov) | |
2415 | goto out; | |
2416 | ||
2417 | for (i = rel_vf_id; i < p_iov->total_vfs; i++) | |
2418 | if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true)) | |
2419 | return i; | |
2420 | ||
2421 | out: | |
2422 | return MAX_NUM_VFS; | |
2423 | } | |
37bff2b9 YM |
2424 | |
2425 | static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, | |
2426 | int vfid) | |
2427 | { | |
2428 | struct qed_dmae_params params; | |
2429 | struct qed_vf_info *vf_info; | |
2430 | ||
2431 | vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); | |
2432 | if (!vf_info) | |
2433 | return -EINVAL; | |
2434 | ||
2435 | memset(¶ms, 0, sizeof(struct qed_dmae_params)); | |
2436 | params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST; | |
2437 | params.src_vfid = vf_info->abs_vf_id; | |
2438 | ||
2439 | if (qed_dmae_host2host(p_hwfn, ptt, | |
2440 | vf_info->vf_mbx.pending_req, | |
2441 | vf_info->vf_mbx.req_phys, | |
2442 | sizeof(union vfpf_tlvs) / 4, ¶ms)) { | |
2443 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | |
2444 | "Failed to copy message from VF 0x%02x\n", vfid); | |
2445 | ||
2446 | return -EIO; | |
2447 | } | |
2448 | ||
2449 | return 0; | |
2450 | } | |
2451 | ||
0b55e27d YM |
2452 | bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) |
2453 | { | |
2454 | struct qed_vf_info *p_vf_info; | |
2455 | ||
2456 | p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); | |
2457 | if (!p_vf_info) | |
2458 | return true; | |
2459 | ||
2460 | return p_vf_info->state == VF_STOPPED; | |
2461 | } | |
2462 | ||
37bff2b9 YM |
2463 | /** |
2464 | * qed_schedule_iov - schedules IOV task for VF and PF | |
2465 | * @hwfn: hardware function pointer | |
2466 | * @flag: IOV flag for VF/PF | |
2467 | */ | |
2468 | void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) | |
2469 | { | |
2470 | smp_mb__before_atomic(); | |
2471 | set_bit(flag, &hwfn->iov_task_flags); | |
2472 | smp_mb__after_atomic(); | |
2473 | DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); | |
2474 | queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); | |
2475 | } | |
2476 | ||
1408cc1f YM |
2477 | void qed_vf_start_iov_wq(struct qed_dev *cdev) |
2478 | { | |
2479 | int i; | |
2480 | ||
2481 | for_each_hwfn(cdev, i) | |
2482 | queue_delayed_work(cdev->hwfns[i].iov_wq, | |
2483 | &cdev->hwfns[i].iov_task, 0); | |
2484 | } | |
2485 | ||
0b55e27d YM |
2486 | int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) |
2487 | { | |
2488 | int i, j; | |
2489 | ||
2490 | for_each_hwfn(cdev, i) | |
2491 | if (cdev->hwfns[i].iov_wq) | |
2492 | flush_workqueue(cdev->hwfns[i].iov_wq); | |
2493 | ||
2494 | /* Mark VFs for disablement */ | |
2495 | qed_iov_set_vfs_to_disable(cdev, true); | |
2496 | ||
2497 | if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) | |
2498 | pci_disable_sriov(cdev->pdev); | |
2499 | ||
2500 | for_each_hwfn(cdev, i) { | |
2501 | struct qed_hwfn *hwfn = &cdev->hwfns[i]; | |
2502 | struct qed_ptt *ptt = qed_ptt_acquire(hwfn); | |
2503 | ||
2504 | /* Failure to acquire the ptt in 100g creates an odd error | |
2505 | * where the first engine has already relased IOV. | |
2506 | */ | |
2507 | if (!ptt) { | |
2508 | DP_ERR(hwfn, "Failed to acquire ptt\n"); | |
2509 | return -EBUSY; | |
2510 | } | |
2511 | ||
2512 | qed_for_each_vf(hwfn, j) { | |
2513 | int k; | |
2514 | ||
2515 | if (!qed_iov_is_valid_vfid(hwfn, j, true)) | |
2516 | continue; | |
2517 | ||
2518 | /* Wait until VF is disabled before releasing */ | |
2519 | for (k = 0; k < 100; k++) { | |
2520 | if (!qed_iov_is_vf_stopped(hwfn, j)) | |
2521 | msleep(20); | |
2522 | else | |
2523 | break; | |
2524 | } | |
2525 | ||
2526 | if (k < 100) | |
2527 | qed_iov_release_hw_for_vf(&cdev->hwfns[i], | |
2528 | ptt, j); | |
2529 | else | |
2530 | DP_ERR(hwfn, | |
2531 | "Timeout waiting for VF's FLR to end\n"); | |
2532 | } | |
2533 | ||
2534 | qed_ptt_release(hwfn, ptt); | |
2535 | } | |
2536 | ||
2537 | qed_iov_set_vfs_to_disable(cdev, false); | |
2538 | ||
2539 | return 0; | |
2540 | } | |
2541 | ||
2542 | static int qed_sriov_enable(struct qed_dev *cdev, int num) | |
2543 | { | |
2544 | struct qed_sb_cnt_info sb_cnt_info; | |
2545 | int i, j, rc; | |
2546 | ||
2547 | if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { | |
2548 | DP_NOTICE(cdev, "Can start at most %d VFs\n", | |
2549 | RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1); | |
2550 | return -EINVAL; | |
2551 | } | |
2552 | ||
2553 | /* Initialize HW for VF access */ | |
2554 | for_each_hwfn(cdev, j) { | |
2555 | struct qed_hwfn *hwfn = &cdev->hwfns[j]; | |
2556 | struct qed_ptt *ptt = qed_ptt_acquire(hwfn); | |
2557 | int num_sbs = 0, limit = 16; | |
2558 | ||
2559 | if (!ptt) { | |
2560 | DP_ERR(hwfn, "Failed to acquire ptt\n"); | |
2561 | rc = -EBUSY; | |
2562 | goto err; | |
2563 | } | |
2564 | ||
2565 | memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); | |
2566 | qed_int_get_num_sbs(hwfn, &sb_cnt_info); | |
2567 | num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit); | |
2568 | ||
2569 | for (i = 0; i < num; i++) { | |
2570 | if (!qed_iov_is_valid_vfid(hwfn, i, false)) | |
2571 | continue; | |
2572 | ||
2573 | rc = qed_iov_init_hw_for_vf(hwfn, | |
2574 | ptt, i, num_sbs / num); | |
2575 | if (rc) { | |
2576 | DP_ERR(cdev, "Failed to enable VF[%d]\n", i); | |
2577 | qed_ptt_release(hwfn, ptt); | |
2578 | goto err; | |
2579 | } | |
2580 | } | |
2581 | ||
2582 | qed_ptt_release(hwfn, ptt); | |
2583 | } | |
2584 | ||
2585 | /* Enable SRIOV PCIe functions */ | |
2586 | rc = pci_enable_sriov(cdev->pdev, num); | |
2587 | if (rc) { | |
2588 | DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc); | |
2589 | goto err; | |
2590 | } | |
2591 | ||
2592 | return num; | |
2593 | ||
2594 | err: | |
2595 | qed_sriov_disable(cdev, false); | |
2596 | return rc; | |
2597 | } | |
2598 | ||
2599 | static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) | |
2600 | { | |
2601 | if (!IS_QED_SRIOV(cdev)) { | |
2602 | DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n"); | |
2603 | return -EOPNOTSUPP; | |
2604 | } | |
2605 | ||
2606 | if (num_vfs_param) | |
2607 | return qed_sriov_enable(cdev, num_vfs_param); | |
2608 | else | |
2609 | return qed_sriov_disable(cdev, true); | |
2610 | } | |
2611 | ||
36558c3d YM |
2612 | void qed_inform_vf_link_state(struct qed_hwfn *hwfn) |
2613 | { | |
2614 | struct qed_mcp_link_capabilities caps; | |
2615 | struct qed_mcp_link_params params; | |
2616 | struct qed_mcp_link_state link; | |
2617 | int i; | |
2618 | ||
2619 | if (!hwfn->pf_iov_info) | |
2620 | return; | |
2621 | ||
2622 | /* Update bulletin of all future possible VFs with link configuration */ | |
2623 | for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) { | |
2624 | memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); | |
2625 | memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); | |
2626 | memcpy(&caps, qed_mcp_get_link_capabilities(hwfn), | |
2627 | sizeof(caps)); | |
2628 | ||
2629 | qed_iov_set_link(hwfn, i, ¶ms, &link, &caps); | |
2630 | } | |
2631 | ||
2632 | qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); | |
2633 | } | |
2634 | ||
37bff2b9 YM |
2635 | static void qed_handle_vf_msg(struct qed_hwfn *hwfn) |
2636 | { | |
2637 | u64 events[QED_VF_ARRAY_LENGTH]; | |
2638 | struct qed_ptt *ptt; | |
2639 | int i; | |
2640 | ||
2641 | ptt = qed_ptt_acquire(hwfn); | |
2642 | if (!ptt) { | |
2643 | DP_VERBOSE(hwfn, QED_MSG_IOV, | |
2644 | "Can't acquire PTT; re-scheduling\n"); | |
2645 | qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG); | |
2646 | return; | |
2647 | } | |
2648 | ||
2649 | qed_iov_pf_get_and_clear_pending_events(hwfn, events); | |
2650 | ||
2651 | DP_VERBOSE(hwfn, QED_MSG_IOV, | |
2652 | "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", | |
2653 | events[0], events[1], events[2]); | |
2654 | ||
2655 | qed_for_each_vf(hwfn, i) { | |
2656 | /* Skip VFs with no pending messages */ | |
2657 | if (!(events[i / 64] & (1ULL << (i % 64)))) | |
2658 | continue; | |
2659 | ||
2660 | DP_VERBOSE(hwfn, QED_MSG_IOV, | |
2661 | "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", | |
2662 | i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); | |
2663 | ||
2664 | /* Copy VF's message to PF's request buffer for that VF */ | |
2665 | if (qed_iov_copy_vf_msg(hwfn, ptt, i)) | |
2666 | continue; | |
2667 | ||
2668 | qed_iov_process_mbx_req(hwfn, ptt, i); | |
2669 | } | |
2670 | ||
2671 | qed_ptt_release(hwfn, ptt); | |
2672 | } | |
2673 | ||
36558c3d YM |
2674 | static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) |
2675 | { | |
2676 | struct qed_ptt *ptt; | |
2677 | int i; | |
2678 | ||
2679 | ptt = qed_ptt_acquire(hwfn); | |
2680 | if (!ptt) { | |
2681 | DP_NOTICE(hwfn, "Failed allocating a ptt entry\n"); | |
2682 | qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); | |
2683 | return; | |
2684 | } | |
2685 | ||
2686 | qed_for_each_vf(hwfn, i) | |
2687 | qed_iov_post_vf_bulletin(hwfn, i, ptt); | |
2688 | ||
2689 | qed_ptt_release(hwfn, ptt); | |
2690 | } | |
2691 | ||
37bff2b9 YM |
2692 | void qed_iov_pf_task(struct work_struct *work) |
2693 | { | |
2694 | struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, | |
2695 | iov_task.work); | |
0b55e27d | 2696 | int rc; |
37bff2b9 YM |
2697 | |
2698 | if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) | |
2699 | return; | |
2700 | ||
0b55e27d YM |
2701 | if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) { |
2702 | struct qed_ptt *ptt = qed_ptt_acquire(hwfn); | |
2703 | ||
2704 | if (!ptt) { | |
2705 | qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); | |
2706 | return; | |
2707 | } | |
2708 | ||
2709 | rc = qed_iov_vf_flr_cleanup(hwfn, ptt); | |
2710 | if (rc) | |
2711 | qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); | |
2712 | ||
2713 | qed_ptt_release(hwfn, ptt); | |
2714 | } | |
2715 | ||
37bff2b9 YM |
2716 | if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) |
2717 | qed_handle_vf_msg(hwfn); | |
36558c3d YM |
2718 | if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, |
2719 | &hwfn->iov_task_flags)) | |
2720 | qed_handle_bulletin_post(hwfn); | |
37bff2b9 YM |
2721 | } |
2722 | ||
2723 | void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) | |
2724 | { | |
2725 | int i; | |
2726 | ||
2727 | for_each_hwfn(cdev, i) { | |
2728 | if (!cdev->hwfns[i].iov_wq) | |
2729 | continue; | |
2730 | ||
2731 | if (schedule_first) { | |
2732 | qed_schedule_iov(&cdev->hwfns[i], | |
2733 | QED_IOV_WQ_STOP_WQ_FLAG); | |
2734 | cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); | |
2735 | } | |
2736 | ||
2737 | flush_workqueue(cdev->hwfns[i].iov_wq); | |
2738 | destroy_workqueue(cdev->hwfns[i].iov_wq); | |
2739 | } | |
2740 | } | |
2741 | ||
2742 | int qed_iov_wq_start(struct qed_dev *cdev) | |
2743 | { | |
2744 | char name[NAME_SIZE]; | |
2745 | int i; | |
2746 | ||
2747 | for_each_hwfn(cdev, i) { | |
2748 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | |
2749 | ||
36558c3d YM |
2750 | /* PFs needs a dedicated workqueue only if they support IOV. |
2751 | * VFs always require one. | |
2752 | */ | |
2753 | if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn)) | |
37bff2b9 YM |
2754 | continue; |
2755 | ||
2756 | snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x", | |
2757 | cdev->pdev->bus->number, | |
2758 | PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id); | |
2759 | ||
2760 | p_hwfn->iov_wq = create_singlethread_workqueue(name); | |
2761 | if (!p_hwfn->iov_wq) { | |
2762 | DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n"); | |
2763 | return -ENOMEM; | |
2764 | } | |
2765 | ||
36558c3d YM |
2766 | if (IS_PF(cdev)) |
2767 | INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); | |
2768 | else | |
2769 | INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task); | |
37bff2b9 YM |
2770 | } |
2771 | ||
2772 | return 0; | |
2773 | } | |
0b55e27d YM |
2774 | |
2775 | const struct qed_iov_hv_ops qed_iov_ops_pass = { | |
2776 | .configure = &qed_sriov_configure, | |
2777 | }; |