]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
ice: Add functions to rebuild host VLAN/MAC config for a VF
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / intel / ice / ice_virtchnl_pf.c
CommitLineData
ddf30f7f
AV
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice.h"
eff380aa 5#include "ice_base.h"
ddf30f7f 6#include "ice_lib.h"
1b8f15b6 7#include "ice_fltr.h"
ddf30f7f 8
4c66d227
JB
9/**
10 * ice_validate_vf_id - helper to check if VF ID is valid
11 * @pf: pointer to the PF structure
12 * @vf_id: the ID of the VF to check
13 */
53bb6698 14static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
4c66d227 15{
53bb6698 16 /* vf_id range is only valid for 0-255, and should always be unsigned */
4c66d227 17 if (vf_id >= pf->num_alloc_vfs) {
53bb6698 18 dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
4c66d227
JB
19 return -EINVAL;
20 }
21 return 0;
22}
23
24/**
25 * ice_check_vf_init - helper to check if VF init complete
26 * @pf: pointer to the PF structure
27 * @vf: the pointer to the VF to check
28 */
29static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
30{
31 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
53bb6698 32 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
4c66d227
JB
33 vf->vf_id);
34 return -EBUSY;
35 }
36 return 0;
37}
38
01b5e89a
BC
39/**
40 * ice_err_to_virt_err - translate errors for VF return code
41 * @ice_err: error return code
42 */
43static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
44{
45 switch (ice_err) {
46 case ICE_SUCCESS:
47 return VIRTCHNL_STATUS_SUCCESS;
48 case ICE_ERR_BAD_PTR:
49 case ICE_ERR_INVAL_SIZE:
50 case ICE_ERR_DEVICE_NOT_SUPPORTED:
51 case ICE_ERR_PARAM:
52 case ICE_ERR_CFG:
53 return VIRTCHNL_STATUS_ERR_PARAM;
54 case ICE_ERR_NO_MEMORY:
55 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
56 case ICE_ERR_NOT_READY:
57 case ICE_ERR_RESET_FAILED:
58 case ICE_ERR_FW_API_VER:
59 case ICE_ERR_AQ_ERROR:
60 case ICE_ERR_AQ_TIMEOUT:
61 case ICE_ERR_AQ_FULL:
62 case ICE_ERR_AQ_NO_WORK:
63 case ICE_ERR_AQ_EMPTY:
64 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
65 default:
66 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
67 }
68}
69
007676b4
AV
70/**
71 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
72 * @pf: pointer to the PF structure
73 * @v_opcode: operation code
74 * @v_retval: return value
75 * @msg: pointer to the msg buffer
76 * @msglen: msg length
77 */
78static void
79ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
cf6c6e01 80 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
007676b4
AV
81{
82 struct ice_hw *hw = &pf->hw;
c1e08830 83 unsigned int i;
007676b4 84
005881bc
BC
85 ice_for_each_vf(pf, i) {
86 struct ice_vf *vf = &pf->vf[i];
87
007676b4
AV
88 /* Not all vfs are enabled so skip the ones that are not */
89 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
90 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
91 continue;
92
93 /* Ignore return value on purpose - a given VF may fail, but
94 * we need to keep going and send to all of them
95 */
96 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
97 msglen, NULL);
98 }
99}
100
7c710869
AV
101/**
102 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
103 * @vf: pointer to the VF structure
104 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
105 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
106 * @link_up: whether or not to set the link up/down
107 */
108static void
109ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
110 int ice_link_speed, bool link_up)
111{
112 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
113 pfe->event_data.link_event_adv.link_status = link_up;
114 /* Speed in Mbps */
115 pfe->event_data.link_event_adv.link_speed =
116 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
117 } else {
118 pfe->event_data.link_event.link_status = link_up;
119 /* Legacy method for virtchnl link speeds */
120 pfe->event_data.link_event.link_speed =
121 (enum virtchnl_link_speed)
122 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
123 }
124}
125
e1fe6926
BC
126/**
127 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
128 * @vf: the VF to check
129 *
130 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
131 * otherwise
132 */
133static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
134{
0ca469fb
MW
135 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
136 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
e1fe6926
BC
137}
138
0b6c6a8b
BC
139/**
140 * ice_is_vf_link_up - check if the VF's link is up
141 * @vf: VF to check if link is up
142 */
143static bool ice_is_vf_link_up(struct ice_vf *vf)
144{
145 struct ice_pf *pf = vf->pf;
146
147 if (ice_check_vf_init(pf, vf))
148 return false;
149
e1fe6926 150 if (ice_vf_has_no_qs_ena(vf))
0b6c6a8b
BC
151 return false;
152 else if (vf->link_forced)
153 return vf->link_up;
154 else
155 return pf->hw.port_info->phy.link_info.link_info &
156 ICE_AQ_LINK_UP;
157}
158
1071a835
AV
159/**
160 * ice_vc_notify_vf_link_state - Inform a VF of link status
161 * @vf: pointer to the VF structure
162 *
163 * send a link status message to a single VF
164 */
165static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
166{
167 struct virtchnl_pf_event pfe = { 0 };
0b6c6a8b 168 struct ice_hw *hw = &vf->pf->hw;
1071a835
AV
169
170 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
171 pfe.severity = PF_EVENT_SEVERITY_INFO;
172
0b6c6a8b
BC
173 if (ice_is_vf_link_up(vf))
174 ice_set_pfe_link(vf, &pfe,
175 hw->port_info->phy.link_info.link_speed, true);
176 else
c61d2342 177 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
1071a835 178
cf6c6e01
MW
179 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
180 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
1071a835
AV
181 sizeof(pfe), NULL);
182}
183
ddf30f7f
AV
184/**
185 * ice_free_vf_res - Free a VF's resources
186 * @vf: pointer to the VF info
187 */
188static void ice_free_vf_res(struct ice_vf *vf)
189{
190 struct ice_pf *pf = vf->pf;
72ecb896 191 int i, last_vector_idx;
ddf30f7f
AV
192
193 /* First, disable VF's configuration API to prevent OS from
194 * accessing the VF's VSI after it's freed or invalidated.
195 */
196 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
197
2f2da36e 198 /* free VSI and disconnect it from the parent uplink */
ddf30f7f
AV
199 if (vf->lan_vsi_idx) {
200 ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
201 vf->lan_vsi_idx = 0;
202 vf->lan_vsi_num = 0;
203 vf->num_mac = 0;
204 }
205
46c276ce 206 last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
9d5c5a52
PG
207
208 /* clear VF MDD event information */
209 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
210 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
211
ddf30f7f 212 /* Disable interrupts so that VF starts in a known state */
72ecb896
BC
213 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
214 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
ddf30f7f
AV
215 ice_flush(&pf->hw);
216 }
217 /* reset some of the state variables keeping track of the resources */
218 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
219 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
220}
221
ddf30f7f
AV
222/**
223 * ice_dis_vf_mappings
224 * @vf: pointer to the VF structure
225 */
226static void ice_dis_vf_mappings(struct ice_vf *vf)
227{
228 struct ice_pf *pf = vf->pf;
229 struct ice_vsi *vsi;
4015d11e 230 struct device *dev;
ddf30f7f
AV
231 int first, last, v;
232 struct ice_hw *hw;
233
234 hw = &pf->hw;
235 vsi = pf->vsi[vf->lan_vsi_idx];
236
4015d11e 237 dev = ice_pf_to_dev(pf);
ddf30f7f 238 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
982b1219 239 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
ddf30f7f 240
cbe66bfe 241 first = vf->first_vector_idx;
46c276ce 242 last = first + pf->num_msix_per_vf - 1;
ddf30f7f
AV
243 for (v = first; v <= last; v++) {
244 u32 reg;
245
246 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
247 GLINT_VECT2FUNC_IS_PF_M) |
248 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
249 GLINT_VECT2FUNC_PF_NUM_M));
250 wr32(hw, GLINT_VECT2FUNC(v), reg);
251 }
252
253 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
254 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
255 else
4015d11e 256 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
ddf30f7f
AV
257
258 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
259 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
260 else
19cce2c6 261 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
ddf30f7f
AV
262}
263
cbe66bfe
BC
264/**
265 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
266 * @pf: pointer to the PF structure
267 *
0ca469fb 268 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
cbe66bfe
BC
269 * the pf->sriov_base_vector.
270 *
271 * Returns 0 on success, and -EINVAL on error.
272 */
273static int ice_sriov_free_msix_res(struct ice_pf *pf)
274{
275 struct ice_res_tracker *res;
276
277 if (!pf)
278 return -EINVAL;
279
280 res = pf->irq_tracker;
281 if (!res)
282 return -EINVAL;
283
284 /* give back irq_tracker resources used */
0ca469fb 285 WARN_ON(pf->sriov_base_vector < res->num_entries);
cbe66bfe
BC
286
287 pf->sriov_base_vector = 0;
288
289 return 0;
290}
291
77ca27c4
PG
292/**
293 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
294 * @vf: pointer to the VF structure
295 */
296void ice_set_vf_state_qs_dis(struct ice_vf *vf)
297{
298 /* Clear Rx/Tx enabled queues flag */
0ca469fb
MW
299 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
300 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
77ca27c4
PG
301 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
302}
303
304/**
305 * ice_dis_vf_qs - Disable the VF queues
306 * @vf: pointer to the VF structure
307 */
308static void ice_dis_vf_qs(struct ice_vf *vf)
309{
310 struct ice_pf *pf = vf->pf;
311 struct ice_vsi *vsi;
312
313 vsi = pf->vsi[vf->lan_vsi_idx];
314
315 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
13a6233b 316 ice_vsi_stop_all_rx_rings(vsi);
77ca27c4
PG
317 ice_set_vf_state_qs_dis(vf);
318}
319
ddf30f7f
AV
320/**
321 * ice_free_vfs - Free all VFs
322 * @pf: pointer to the PF structure
323 */
324void ice_free_vfs(struct ice_pf *pf)
325{
4015d11e 326 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 327 struct ice_hw *hw = &pf->hw;
c1e08830 328 unsigned int tmp, i;
ddf30f7f
AV
329
330 if (!pf->vf)
331 return;
332
333 while (test_and_set_bit(__ICE_VF_DIS, pf->state))
334 usleep_range(1000, 2000);
335
72ecb896
BC
336 /* Disable IOV before freeing resources. This lets any VF drivers
337 * running in the host get themselves cleaned up before we yank
338 * the carpet out from underneath their feet.
339 */
340 if (!pci_vfs_assigned(pf->pdev))
341 pci_disable_sriov(pf->pdev);
342 else
4015d11e 343 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
72ecb896 344
f844d521
BC
345 /* Avoid wait time by stopping all VFs at the same time */
346 ice_for_each_vf(pf, i)
347 if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
348 ice_dis_vf_qs(&pf->vf[i]);
349
ddf30f7f 350 tmp = pf->num_alloc_vfs;
46c276ce 351 pf->num_qps_per_vf = 0;
ddf30f7f
AV
352 pf->num_alloc_vfs = 0;
353 for (i = 0; i < tmp; i++) {
354 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
1f9639d2 355 /* disable VF qp mappings and set VF disable state */
ddf30f7f 356 ice_dis_vf_mappings(&pf->vf[i]);
1f9639d2 357 set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
ddf30f7f
AV
358 ice_free_vf_res(&pf->vf[i]);
359 }
360 }
361
cbe66bfe 362 if (ice_sriov_free_msix_res(pf))
4015d11e 363 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
cbe66bfe 364
4015d11e 365 devm_kfree(dev, pf->vf);
ddf30f7f
AV
366 pf->vf = NULL;
367
368 /* This check is for when the driver is unloaded while VFs are
369 * assigned. Setting the number of VFs to 0 through sysfs is caught
370 * before this function ever gets called.
371 */
372 if (!pci_vfs_assigned(pf->pdev)) {
53bb6698 373 unsigned int vf_id;
ddf30f7f
AV
374
375 /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
376 * work correctly when SR-IOV gets re-enabled.
377 */
378 for (vf_id = 0; vf_id < tmp; vf_id++) {
379 u32 reg_idx, bit_idx;
380
381 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
382 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
383 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
384 }
385 }
386 clear_bit(__ICE_VF_DIS, pf->state);
387 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
388}
389
390/**
391 * ice_trigger_vf_reset - Reset a VF on HW
392 * @vf: pointer to the VF structure
393 * @is_vflr: true if VFLR was issued, false if not
29d42f1f 394 * @is_pfr: true if the reset was triggered due to a previous PFR
ddf30f7f
AV
395 *
396 * Trigger hardware to start a reset for a particular VF. Expects the caller
397 * to wait the proper amount of time to allow hardware to reset the VF before
398 * it cleans up and restores VF functionality.
399 */
29d42f1f 400static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
ddf30f7f
AV
401{
402 struct ice_pf *pf = vf->pf;
403 u32 reg, reg_idx, bit_idx;
53bb6698 404 unsigned int vf_abs_id, i;
4015d11e 405 struct device *dev;
ddf30f7f 406 struct ice_hw *hw;
ddf30f7f 407
4015d11e 408 dev = ice_pf_to_dev(pf);
ddf30f7f
AV
409 hw = &pf->hw;
410 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
411
412 /* Inform VF that it is no longer active, as a warning */
413 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
414
415 /* Disable VF's configuration API during reset. The flag is re-enabled
416 * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
417 * It's normally disabled in ice_free_vf_res(), but it's safer
418 * to do it earlier to give some time to finish to any VF config
419 * functions that may still be running at this point.
420 */
421 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
82ba0128 422
29d42f1f
MW
423 /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
424 * in the case of VFR. If this is done for PFR, it can mess up VF
425 * resets because the VF driver may already have started cleanup
426 * by the time we get here.
82ba0128 427 */
29d42f1f 428 if (!is_pfr)
39559456 429 wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
ddf30f7f
AV
430
431 /* In the case of a VFLR, the HW has already reset the VF and we
432 * just need to clean up, so don't hit the VFRTRIG register.
433 */
434 if (!is_vflr) {
435 /* reset VF using VPGEN_VFRTRIG reg */
436 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
437 reg |= VPGEN_VFRTRIG_VFSWR_M;
438 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
439 }
440 /* clear the VFLR bit in GLGEN_VFLRSTAT */
441 reg_idx = (vf_abs_id) / 32;
442 bit_idx = (vf_abs_id) % 32;
443 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
444 ice_flush(hw);
445
446 wr32(hw, PF_PCI_CIAA,
447 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
60d628ea 448 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
ddf30f7f 449 reg = rd32(hw, PF_PCI_CIAD);
60d628ea
BC
450 /* no transactions pending so stop polling */
451 if ((reg & VF_TRANS_PENDING_M) == 0)
452 break;
453
53bb6698 454 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
60d628ea 455 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
ddf30f7f
AV
456 }
457}
458
77a7a84d
MS
459/**
460 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
461 * @vsi: the VSI to update
b093841f 462 * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
f9867df6 463 * @enable: true for enable PVID false for disable
77a7a84d 464 */
b093841f 465static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
ddf30f7f 466{
ddf30f7f 467 struct ice_hw *hw = &vsi->back->hw;
b093841f 468 struct ice_aqc_vsi_props *info;
198a666a 469 struct ice_vsi_ctx *ctxt;
ddf30f7f 470 enum ice_status status;
198a666a
BA
471 int ret = 0;
472
9efe35d0 473 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
198a666a
BA
474 if (!ctxt)
475 return -ENOMEM;
ddf30f7f 476
77a7a84d 477 ctxt->info = vsi->info;
b093841f
BC
478 info = &ctxt->info;
479 if (enable) {
480 info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
481 ICE_AQ_VSI_PVLAN_INSERT_PVID |
482 ICE_AQ_VSI_VLAN_EMOD_STR;
483 info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
484 } else {
485 info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
486 ICE_AQ_VSI_VLAN_MODE_ALL;
487 info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
488 }
489
490 info->pvid = cpu_to_le16(pvid_info);
491 info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
492 ICE_AQ_VSI_PROP_SW_VALID);
ddf30f7f 493
198a666a 494 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
ddf30f7f 495 if (status) {
0fee3577
LY
496 dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
497 ice_stat_str(status),
498 ice_aq_str(hw->adminq.sq_last_status));
198a666a
BA
499 ret = -EIO;
500 goto out;
ddf30f7f
AV
501 }
502
b093841f
BC
503 vsi->info.vlan_flags = info->vlan_flags;
504 vsi->info.sw_flags2 = info->sw_flags2;
505 vsi->info.pvid = info->pvid;
198a666a 506out:
9efe35d0 507 kfree(ctxt);
198a666a 508 return ret;
ddf30f7f
AV
509}
510
511/**
512 * ice_vf_vsi_setup - Set up a VF VSI
513 * @pf: board private structure
514 * @pi: pointer to the port_info instance
f9867df6 515 * @vf_id: defines VF ID to which this VSI connects.
ddf30f7f
AV
516 *
517 * Returns pointer to the successfully allocated VSI struct on success,
518 * otherwise returns NULL on failure.
519 */
520static struct ice_vsi *
521ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
522{
523 return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
524}
525
cbe66bfe 526/**
1337175d 527 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
cbe66bfe
BC
528 * @pf: pointer to PF structure
529 * @vf: pointer to VF that the first MSIX vector index is being calculated for
530 *
1337175d
PG
531 * This returns the first MSIX vector index in PF space that is used by this VF.
532 * This index is used when accessing PF relative registers such as
533 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
534 * This will always be the OICR index in the AVF driver so any functionality
cbe66bfe
BC
535 * using vf->first_vector_idx for queue configuration will have to increment by
536 * 1 to avoid meddling with the OICR index.
537 */
538static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
539{
46c276ce 540 return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
cbe66bfe
BC
541}
542
350e822c
BC
543/**
544 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
545 * @vf: VF to add MAC filters for
546 *
547 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
548 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
549 */
550static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
551{
552 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
553 struct device *dev = ice_pf_to_dev(vf->pf);
554 u16 vlan_id = 0;
555 int err;
556
557 if (vf->port_vlan_info) {
558 err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
559 if (err) {
560 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
561 vf->vf_id, err);
562 return err;
563 }
564
565 vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
566 }
567
568 /* vlan_id will either be 0 or the port VLAN number */
569 err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
570 if (err) {
571 dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
572 vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
573 err);
574 return err;
575 }
576
577 return 0;
578}
579
580/**
581 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
582 * @vf: VF to add MAC filters for
583 *
584 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
585 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
586 */
587static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
588{
589 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
590 struct device *dev = ice_pf_to_dev(vf->pf);
591 enum ice_status status;
592 u8 broadcast[ETH_ALEN];
593
594 eth_broadcast_addr(broadcast);
595 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
596 if (status) {
597 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
598 vf->vf_id, ice_stat_str(status));
599 return ice_status_to_errno(status);
600 }
601
602 vf->num_mac++;
603
604 if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
605 status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
606 ICE_FWD_TO_VSI);
607 if (status) {
608 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
609 &vf->dflt_lan_addr.addr[0], vf->vf_id,
610 ice_stat_str(status));
611 return ice_status_to_errno(status);
612 }
613 vf->num_mac++;
614 }
615
616 return 0;
617}
618
ddf30f7f
AV
619/**
620 * ice_alloc_vsi_res - Setup VF VSI and its resources
621 * @vf: pointer to the VF structure
622 *
623 * Returns 0 on success, negative value on failure
624 */
625static int ice_alloc_vsi_res(struct ice_vf *vf)
626{
627 struct ice_pf *pf = vf->pf;
ddf30f7f 628 struct ice_vsi *vsi;
4015d11e 629 struct device *dev;
350e822c 630 int ret;
ddf30f7f 631
4015d11e 632 dev = ice_pf_to_dev(pf);
cbe66bfe
BC
633 /* first vector index is the VFs OICR index */
634 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
635
ddf30f7f 636 vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
ddf30f7f 637 if (!vsi) {
4015d11e 638 dev_err(dev, "Failed to create VF VSI\n");
ddf30f7f
AV
639 return -ENOMEM;
640 }
641
642 vf->lan_vsi_idx = vsi->idx;
643 vf->lan_vsi_num = vsi->vsi_num;
644
350e822c
BC
645 ret = ice_vf_rebuild_host_vlan_cfg(vf);
646 if (ret) {
647 dev_err(dev, "failed to rebuild default MAC configuration for VF %d, error %d\n",
648 vf->vf_id, ret);
649 goto ice_alloc_vsi_res_exit;
840bcd88 650 }
ddf30f7f 651
ddf30f7f 652
350e822c
BC
653 ret = ice_vf_rebuild_host_mac_cfg(vf);
654 if (ret) {
655 dev_err(dev, "failed to rebuild default MAC configuration for VF %d, error %d\n",
656 vf->vf_id, ret);
657 goto ice_alloc_vsi_res_exit;
658 }
ddf30f7f
AV
659
660 /* Clear this bit after VF initialization since we shouldn't reclaim
661 * and reassign interrupts for synchronous or asynchronous VFR events.
94c4441b 662 * We don't want to reconfigure interrupts since AVF driver doesn't
ddf30f7f
AV
663 * expect vector assignment to be changed unless there is a request for
664 * more vectors.
665 */
ddf30f7f 666ice_alloc_vsi_res_exit:
350e822c 667 return ret;
ddf30f7f
AV
668}
669
eb2af3ee
BC
670/**
671 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
672 * @vf: VF to configure trust setting for
673 */
674static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
675{
676 if (vf->trusted)
677 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
678 else
679 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
680}
681
ddf30f7f
AV
682/**
683 * ice_alloc_vf_res - Allocate VF resources
684 * @vf: pointer to the VF structure
685 */
686static int ice_alloc_vf_res(struct ice_vf *vf)
687{
5743020d
AA
688 struct ice_pf *pf = vf->pf;
689 int tx_rx_queue_left;
ddf30f7f
AV
690 int status;
691
5743020d
AA
692 /* Update number of VF queues, in case VF had requested for queue
693 * changes
694 */
8c243700
AV
695 tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf),
696 ice_get_avail_rxq_count(pf));
46c276ce 697 tx_rx_queue_left += pf->num_qps_per_vf;
5743020d
AA
698 if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
699 vf->num_req_qs != vf->num_vf_qs)
700 vf->num_vf_qs = vf->num_req_qs;
701
66b29e7a
AA
702 /* setup VF VSI and necessary resources */
703 status = ice_alloc_vsi_res(vf);
704 if (status)
705 goto ice_alloc_vf_res_exit;
706
eb2af3ee 707 ice_vf_set_host_trust_cfg(vf);
ddf30f7f
AV
708
709 /* VF is now completely initialized */
710 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
711
712 return status;
713
714ice_alloc_vf_res_exit:
715 ice_free_vf_res(vf);
716 return status;
717}
718
719/**
ac371613
BC
720 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
721 * @vf: VF to enable MSIX mappings for
ddf30f7f 722 *
ac371613
BC
723 * Some of the registers need to be indexed/configured using hardware global
724 * device values and other registers need 0-based values, which represent PF
725 * based values.
ddf30f7f 726 */
ac371613 727static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
ddf30f7f 728{
ac371613
BC
729 int device_based_first_msix, device_based_last_msix;
730 int pf_based_first_msix, pf_based_last_msix, v;
ddf30f7f 731 struct ice_pf *pf = vf->pf;
ac371613 732 int device_based_vf_id;
ddf30f7f 733 struct ice_hw *hw;
ddf30f7f
AV
734 u32 reg;
735
736 hw = &pf->hw;
ac371613
BC
737 pf_based_first_msix = vf->first_vector_idx;
738 pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
739
740 device_based_first_msix = pf_based_first_msix +
741 pf->hw.func_caps.common_cap.msix_vector_first_id;
742 device_based_last_msix =
743 (device_based_first_msix + pf->num_msix_per_vf) - 1;
744 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
745
746 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
747 VPINT_ALLOC_FIRST_M) |
748 ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
749 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
ddf30f7f
AV
750 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
751
ac371613 752 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
1337175d 753 & VPINT_ALLOC_PCI_FIRST_M) |
ac371613
BC
754 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
755 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
982b1219 756 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
ac371613 757
ddf30f7f 758 /* map the interrupts to its functions */
ac371613
BC
759 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
760 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
ddf30f7f
AV
761 GLINT_VECT2FUNC_VF_NUM_M) |
762 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
763 GLINT_VECT2FUNC_PF_NUM_M));
764 wr32(hw, GLINT_VECT2FUNC(v), reg);
765 }
766
ac371613
BC
767 /* Map mailbox interrupt to VF MSI-X vector 0 */
768 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
769}
770
771/**
772 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
773 * @vf: VF to enable the mappings for
774 * @max_txq: max Tx queues allowed on the VF's VSI
775 * @max_rxq: max Rx queues allowed on the VF's VSI
776 */
777static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
778{
779 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
780 struct device *dev = ice_pf_to_dev(vf->pf);
781 struct ice_hw *hw = &vf->pf->hw;
782 u32 reg;
783
982b1219
AV
784 /* set regardless of mapping mode */
785 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
786
ddf30f7f
AV
787 /* VF Tx queues allocation */
788 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
ddf30f7f
AV
789 /* set the VF PF Tx queue range
790 * VFNUMQ value should be set to (number of queues - 1). A value
791 * of 0 means 1 queue and a value of 255 means 256 queues
792 */
793 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
794 VPLAN_TX_QBASE_VFFIRSTQ_M) |
ac371613 795 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
ddf30f7f
AV
796 VPLAN_TX_QBASE_VFNUMQ_M));
797 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
798 } else {
4015d11e 799 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
ddf30f7f
AV
800 }
801
982b1219
AV
802 /* set regardless of mapping mode */
803 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
804
ddf30f7f
AV
805 /* VF Rx queues allocation */
806 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
ddf30f7f
AV
807 /* set the VF PF Rx queue range
808 * VFNUMQ value should be set to (number of queues - 1). A value
809 * of 0 means 1 queue and a value of 255 means 256 queues
810 */
811 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
812 VPLAN_RX_QBASE_VFFIRSTQ_M) |
ac371613 813 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
ddf30f7f
AV
814 VPLAN_RX_QBASE_VFNUMQ_M));
815 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
816 } else {
4015d11e 817 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
ddf30f7f
AV
818 }
819}
820
ac371613
BC
821/**
822 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
823 * @vf: pointer to the VF structure
824 */
825static void ice_ena_vf_mappings(struct ice_vf *vf)
826{
827 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
828
829 ice_ena_vf_msix_mappings(vf);
830 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
831}
832
ddf30f7f
AV
833/**
834 * ice_determine_res
835 * @pf: pointer to the PF structure
836 * @avail_res: available resources in the PF structure
837 * @max_res: maximum resources that can be given per VF
838 * @min_res: minimum resources that can be given per VF
839 *
840 * Returns non-zero value if resources (queues/vectors) are available or
841 * returns zero if PF cannot accommodate for all num_alloc_vfs.
842 */
843static int
844ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
845{
846 bool checked_min_res = false;
847 int res;
848
849 /* start by checking if PF can assign max number of resources for
850 * all num_alloc_vfs.
851 * if yes, return number per VF
852 * If no, divide by 2 and roundup, check again
853 * repeat the loop till we reach a point where even minimum resources
854 * are not available, in that case return 0
855 */
856 res = max_res;
857 while ((res >= min_res) && !checked_min_res) {
858 int num_all_res;
859
860 num_all_res = pf->num_alloc_vfs * res;
861 if (num_all_res <= avail_res)
862 return res;
863
864 if (res == min_res)
865 checked_min_res = true;
866
867 res = DIV_ROUND_UP(res, 2);
868 }
869 return 0;
870}
871
cbe66bfe
BC
872/**
873 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
874 * @vf: VF to calculate the register index for
875 * @q_vector: a q_vector associated to the VF
876 */
877int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
878{
879 struct ice_pf *pf;
880
881 if (!vf || !q_vector)
882 return -EINVAL;
883
884 pf = vf->pf;
885
886 /* always add one to account for the OICR being the first MSIX */
46c276ce 887 return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
cbe66bfe
BC
888 q_vector->v_idx + 1;
889}
890
891/**
892 * ice_get_max_valid_res_idx - Get the max valid resource index
893 * @res: pointer to the resource to find the max valid index for
894 *
895 * Start from the end of the ice_res_tracker and return right when we find the
896 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
897 * valid for SR-IOV because it is the only consumer that manipulates the
898 * res->end and this is always called when res->end is set to res->num_entries.
899 */
900static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
901{
902 int i;
903
904 if (!res)
905 return -EINVAL;
906
907 for (i = res->num_entries - 1; i >= 0; i--)
908 if (res->list[i] & ICE_RES_VALID_BIT)
909 return i;
910
911 return 0;
912}
913
914/**
915 * ice_sriov_set_msix_res - Set any used MSIX resources
916 * @pf: pointer to PF structure
917 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
918 *
919 * This function allows SR-IOV resources to be taken from the end of the PF's
0ca469fb
MW
920 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
921 * just set the pf->sriov_base_vector and return success.
cbe66bfe 922 *
0ca469fb
MW
923 * If there are not enough resources available, return an error. This should
924 * always be caught by ice_set_per_vf_res().
cbe66bfe
BC
925 *
926 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
927 * in the PF's space available for SR-IOV.
928 */
929static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
930{
0ca469fb
MW
931 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
932 int vectors_used = pf->irq_tracker->num_entries;
cbe66bfe
BC
933 int sriov_base_vector;
934
0ca469fb 935 sriov_base_vector = total_vectors - num_msix_needed;
cbe66bfe
BC
936
937 /* make sure we only grab irq_tracker entries from the list end and
938 * that we have enough available MSIX vectors
939 */
0ca469fb 940 if (sriov_base_vector < vectors_used)
cbe66bfe
BC
941 return -EINVAL;
942
943 pf->sriov_base_vector = sriov_base_vector;
944
cbe66bfe
BC
945 return 0;
946}
947
ddf30f7f 948/**
0ca469fb 949 * ice_set_per_vf_res - check if vectors and queues are available
ddf30f7f
AV
950 * @pf: pointer to the PF structure
951 *
0ca469fb
MW
952 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
953 * get more vectors and can enable more queues per VF. Note that this does not
954 * grab any vectors from the SW pool already allocated. Also note, that all
955 * vector counts include one for each VF's miscellaneous interrupt vector
956 * (i.e. OICR).
957 *
958 * Minimum VFs - 2 vectors, 1 queue pair
959 * Small VFs - 5 vectors, 4 queue pairs
960 * Medium VFs - 17 vectors, 16 queue pairs
961 *
962 * Second, determine number of queue pairs per VF by starting with a pre-defined
963 * maximum each VF supports. If this is not possible, then we adjust based on
964 * queue pairs available on the device.
965 *
966 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
967 * by each VF during VF initialization and reset.
ddf30f7f 968 */
0ca469fb 969static int ice_set_per_vf_res(struct ice_pf *pf)
ddf30f7f 970{
cbe66bfe 971 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
46c276ce 972 int msix_avail_per_vf, msix_avail_for_sriov;
4015d11e 973 struct device *dev = ice_pf_to_dev(pf);
46c276ce 974 u16 num_msix_per_vf, num_txq, num_rxq;
ddf30f7f 975
cbe66bfe 976 if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
ddf30f7f
AV
977 return -EINVAL;
978
0ca469fb 979 /* determine MSI-X resources per VF */
46c276ce
BC
980 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
981 pf->irq_tracker->num_entries;
982 msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
983 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
984 num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
985 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
986 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
987 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
988 num_msix_per_vf = ICE_MIN_INTR_PER_VF;
ddf30f7f 989 } else {
46c276ce
BC
990 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
991 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
0ca469fb 992 pf->num_alloc_vfs);
ddf30f7f
AV
993 return -EIO;
994 }
995
0ca469fb 996 /* determine queue resources per VF */
8c243700 997 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
46c276ce
BC
998 min_t(u16,
999 num_msix_per_vf - ICE_NONQ_VECS_VF,
0ca469fb
MW
1000 ICE_MAX_RSS_QS_PER_VF),
1001 ICE_MIN_QS_PER_VF);
ddf30f7f 1002
8c243700 1003 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
46c276ce
BC
1004 min_t(u16,
1005 num_msix_per_vf - ICE_NONQ_VECS_VF,
0ca469fb
MW
1006 ICE_MAX_RSS_QS_PER_VF),
1007 ICE_MIN_QS_PER_VF);
ddf30f7f 1008
0ca469fb 1009 if (!num_txq || !num_rxq) {
46c276ce
BC
1010 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
1011 ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
ddf30f7f 1012 return -EIO;
0ca469fb 1013 }
ddf30f7f 1014
46c276ce 1015 if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
0ca469fb
MW
1016 dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
1017 pf->num_alloc_vfs);
cbe66bfe 1018 return -EINVAL;
0ca469fb 1019 }
cbe66bfe 1020
0ca469fb 1021 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
46c276ce
BC
1022 pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
1023 pf->num_msix_per_vf = num_msix_per_vf;
0ca469fb 1024 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
46c276ce 1025 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
ddf30f7f
AV
1026
1027 return 0;
1028}
1029
cfcee02b
BC
1030/**
1031 * ice_clear_vf_reset_trigger - enable VF to access hardware
1032 * @vf: VF to enabled hardware access for
1033 */
1034static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
1035{
1036 struct ice_hw *hw = &vf->pf->hw;
1037 u32 reg;
1038
1039 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
1040 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
1041 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
1042 ice_flush(hw);
1043}
1044
ddf30f7f
AV
1045/**
1046 * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
1047 * @vf: pointer to the VF structure
1048 *
1049 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1050 * have verified whether the reset is finished properly, and ensure the
1051 * minimum amount of wait time has passed. Reallocate VF resources back to make
1052 * VF state active
1053 */
1054static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
1055{
1056 struct ice_pf *pf = vf->pf;
1057 struct ice_hw *hw;
ddf30f7f
AV
1058
1059 hw = &pf->hw;
1060
cfcee02b
BC
1061 /* Allow HW to access VF memory after calling
1062 * ice_clear_vf_reset_trigger(). If we did it any sooner, HW could
1063 * access memory while it was being freed in ice_free_vf_res(), causing
1064 * an IOMMU fault.
ddf30f7f
AV
1065 *
1066 * On the other hand, this needs to be done ASAP, because the VF driver
1067 * is waiting for this to happen and may report a timeout. It's
1068 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1069 * it.
1070 */
cfcee02b 1071 ice_clear_vf_reset_trigger(vf);
ddf30f7f
AV
1072
1073 /* reallocate VF resources to finish resetting the VSI state */
1074 if (!ice_alloc_vf_res(vf)) {
1075 ice_ena_vf_mappings(vf);
1076 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1077 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
ddf30f7f
AV
1078 }
1079
1080 /* Tell the VF driver the reset is done. This needs to be done only
1081 * after VF has been fully initialized, because the VF driver may
1082 * request resources immediately after setting this flag.
1083 */
1084 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1085}
1086
5eda8afd
AA
1087/**
1088 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
1089 * @vf: pointer to the VF info
1090 * @vsi: the VSI being configured
1091 * @promisc_m: mask of promiscuous config bits
1092 * @rm_promisc: promisc flag request from the VF to remove or add filter
1093 *
1094 * This function configures VF VSI promiscuous mode, based on the VF requests,
1095 * for Unicast, Multicast and VLAN
1096 */
1097static enum ice_status
1098ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
1099 bool rm_promisc)
1100{
1101 struct ice_pf *pf = vf->pf;
1102 enum ice_status status = 0;
1103 struct ice_hw *hw;
1104
1105 hw = &pf->hw;
cd6d6b83 1106 if (vsi->num_vlan) {
5eda8afd
AA
1107 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
1108 rm_promisc);
b093841f 1109 } else if (vf->port_vlan_info) {
5eda8afd
AA
1110 if (rm_promisc)
1111 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
b093841f 1112 vf->port_vlan_info);
5eda8afd
AA
1113 else
1114 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
b093841f 1115 vf->port_vlan_info);
5eda8afd
AA
1116 } else {
1117 if (rm_promisc)
1118 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1119 0);
1120 else
1121 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1122 0);
1123 }
1124
1125 return status;
1126}
1127
d82dd83d
AA
1128/**
1129 * ice_config_res_vfs - Finalize allocation of VFs resources in one go
1130 * @pf: pointer to the PF structure
1131 *
1132 * This function is being called as last part of resetting all VFs, or when
1133 * configuring VFs for the first time, where there is no resource to be freed
1134 * Returns true if resources were properly allocated for all VFs, and false
1135 * otherwise.
1136 */
1137static bool ice_config_res_vfs(struct ice_pf *pf)
1138{
4015d11e 1139 struct device *dev = ice_pf_to_dev(pf);
d82dd83d
AA
1140 struct ice_hw *hw = &pf->hw;
1141 int v;
1142
0ca469fb 1143 if (ice_set_per_vf_res(pf)) {
4015d11e 1144 dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n");
d82dd83d
AA
1145 return false;
1146 }
1147
1148 /* rearm global interrupts */
1149 if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state))
1150 ice_irq_dynamic_ena(hw, NULL, NULL);
1151
1152 /* Finish resetting each VF and allocate resources */
005881bc 1153 ice_for_each_vf(pf, v) {
d82dd83d
AA
1154 struct ice_vf *vf = &pf->vf[v];
1155
46c276ce 1156 vf->num_vf_qs = pf->num_qps_per_vf;
4015d11e
BC
1157 dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id,
1158 vf->num_vf_qs);
d82dd83d
AA
1159 ice_cleanup_and_realloc_vf(vf);
1160 }
1161
1162 ice_flush(hw);
1163 clear_bit(__ICE_VF_DIS, pf->state);
1164
1165 return true;
1166}
1167
ddf30f7f
AV
1168/**
1169 * ice_reset_all_vfs - reset all allocated VFs in one go
1170 * @pf: pointer to the PF structure
1171 * @is_vflr: true if VFLR was issued, false if not
1172 *
1173 * First, tell the hardware to reset each VF, then do all the waiting in one
1174 * chunk, and finally finish restoring each VF after the wait. This is useful
1175 * during PF routines which need to reset all VFs, as otherwise it must perform
1176 * these resets in a serialized fashion.
1177 *
1178 * Returns true if any VFs were reset, and false otherwise.
1179 */
1180bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1181{
4015d11e 1182 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 1183 struct ice_hw *hw = &pf->hw;
42b2cc83 1184 struct ice_vf *vf;
ddf30f7f
AV
1185 int v, i;
1186
1187 /* If we don't have any VFs, then there is nothing to reset */
1188 if (!pf->num_alloc_vfs)
1189 return false;
1190
1191 /* If VFs have been disabled, there is no need to reset */
1192 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1193 return false;
1194
1195 /* Begin reset on all VFs at once */
005881bc 1196 ice_for_each_vf(pf, v)
29d42f1f 1197 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
ddf30f7f 1198
005881bc 1199 ice_for_each_vf(pf, v) {
06914ac2
MW
1200 struct ice_vsi *vsi;
1201
1202 vf = &pf->vf[v];
1203 vsi = pf->vsi[vf->lan_vsi_idx];
1204 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1205 ice_dis_vf_qs(vf);
1206 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1207 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1208 }
ddf30f7f
AV
1209
1210 /* HW requires some time to make sure it can flush the FIFO for a VF
1211 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1212 * sequence to make sure that it has completed. We'll keep track of
1213 * the VFs using a simple iterator that increments once that VF has
1214 * finished resetting.
1215 */
1216 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
ddf30f7f
AV
1217 /* Check each VF in sequence */
1218 while (v < pf->num_alloc_vfs) {
ddf30f7f
AV
1219 u32 reg;
1220
42b2cc83 1221 vf = &pf->vf[v];
ddf30f7f 1222 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
60d628ea
BC
1223 if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1224 /* only delay if the check failed */
1225 usleep_range(10, 20);
ddf30f7f 1226 break;
60d628ea 1227 }
ddf30f7f
AV
1228
1229 /* If the current VF has finished resetting, move on
1230 * to the next VF in sequence.
1231 */
1232 v++;
1233 }
1234 }
1235
1236 /* Display a warning if at least one VF didn't manage to reset in
1237 * time, but continue on with the operation.
1238 */
1239 if (v < pf->num_alloc_vfs)
4015d11e 1240 dev_warn(dev, "VF reset check timeout\n");
ddf30f7f
AV
1241
1242 /* free VF resources to begin resetting the VSI state */
005881bc 1243 ice_for_each_vf(pf, v) {
5743020d
AA
1244 vf = &pf->vf[v];
1245
1246 ice_free_vf_res(vf);
1247
1248 /* Free VF queues as well, and reallocate later.
1249 * If a given VF has different number of queues
1250 * configured, the request for update will come
1251 * via mailbox communication.
1252 */
1253 vf->num_vf_qs = 0;
1254 }
ddf30f7f 1255
cbe66bfe 1256 if (ice_sriov_free_msix_res(pf))
4015d11e 1257 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
cbe66bfe 1258
d82dd83d 1259 if (!ice_config_res_vfs(pf))
ddf30f7f 1260 return false;
ddf30f7f
AV
1261
1262 return true;
1263}
1264
ec4f5a43
AA
1265/**
1266 * ice_is_vf_disabled
1267 * @vf: pointer to the VF info
1268 *
1269 * Returns true if the PF or VF is disabled, false otherwise.
1270 */
1271static bool ice_is_vf_disabled(struct ice_vf *vf)
1272{
1273 struct ice_pf *pf = vf->pf;
1274
1275 /* If the PF has been disabled, there is no need resetting VF until
1276 * PF is active again. Similarly, if the VF has been disabled, this
1277 * means something else is resetting the VF, so we shouldn't continue.
1278 * Otherwise, set disable VF state bit for actual reset, and continue.
1279 */
1280 return (test_bit(__ICE_VF_DIS, pf->state) ||
1281 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1282}
1283
007676b4
AV
1284/**
1285 * ice_reset_vf - Reset a particular VF
1286 * @vf: pointer to the VF structure
1287 * @is_vflr: true if VFLR was issued, false if not
1288 *
f844d521
BC
1289 * Returns true if the VF is currently in reset, resets successfully, or resets
1290 * are disabled and false otherwise.
007676b4 1291 */
9d5c5a52 1292bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
007676b4
AV
1293{
1294 struct ice_pf *pf = vf->pf;
03f7a986 1295 struct ice_vsi *vsi;
4015d11e 1296 struct device *dev;
5eda8afd 1297 struct ice_hw *hw;
007676b4 1298 bool rsd = false;
5eda8afd 1299 u8 promisc_m;
007676b4
AV
1300 u32 reg;
1301 int i;
1302
4015d11e
BC
1303 dev = ice_pf_to_dev(pf);
1304
f844d521
BC
1305 if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
1306 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1307 vf->vf_id);
1308 return true;
1309 }
1310
ec4f5a43 1311 if (ice_is_vf_disabled(vf)) {
4015d11e
BC
1312 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1313 vf->vf_id);
ec4f5a43
AA
1314 return true;
1315 }
cb6a8dc0 1316
ec4f5a43
AA
1317 /* Set VF disable bit state here, before triggering reset */
1318 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
29d42f1f 1319 ice_trigger_vf_reset(vf, is_vflr, false);
007676b4 1320
03f7a986
AV
1321 vsi = pf->vsi[vf->lan_vsi_idx];
1322
77ca27c4
PG
1323 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1324 ice_dis_vf_qs(vf);
06914ac2
MW
1325
1326 /* Call Disable LAN Tx queue AQ whether or not queues are
1327 * enabled. This is needed for successful completion of VFR.
1328 */
1329 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1330 NULL, ICE_VF_RESET, vf->vf_id, NULL);
007676b4 1331
5eda8afd 1332 hw = &pf->hw;
007676b4
AV
1333 /* poll VPGEN_VFRSTAT reg to make sure
1334 * that reset is complete
1335 */
1336 for (i = 0; i < 10; i++) {
1337 /* VF reset requires driver to first reset the VF and then
1338 * poll the status register to make sure that the reset
1339 * completed successfully.
1340 */
007676b4
AV
1341 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1342 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1343 rsd = true;
1344 break;
1345 }
60d628ea
BC
1346
1347 /* only sleep if the reset is not done */
1348 usleep_range(10, 20);
007676b4
AV
1349 }
1350
1351 /* Display a warning if VF didn't manage to reset in time, but need to
1352 * continue on with the operation.
1353 */
1354 if (!rsd)
4015d11e 1355 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
007676b4 1356
5eda8afd
AA
1357 /* disable promiscuous modes in case they were enabled
1358 * ignore any error if disabling process failed
1359 */
1360 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1361 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
b093841f 1362 if (vf->port_vlan_info || vsi->num_vlan)
5eda8afd
AA
1363 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1364 else
1365 promisc_m = ICE_UCAST_PROMISC_BITS;
1366
1367 vsi = pf->vsi[vf->lan_vsi_idx];
1368 if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
4015d11e 1369 dev_err(dev, "disabling promiscuous mode failed\n");
5eda8afd
AA
1370 }
1371
007676b4
AV
1372 /* free VF resources to begin resetting the VSI state */
1373 ice_free_vf_res(vf);
1374
1375 ice_cleanup_and_realloc_vf(vf);
1376
1377 ice_flush(hw);
007676b4
AV
1378
1379 return true;
1380}
1381
53b8decb
AV
1382/**
1383 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1384 * @pf: pointer to the PF structure
1385 */
1386void ice_vc_notify_link_state(struct ice_pf *pf)
1387{
1388 int i;
1389
005881bc 1390 ice_for_each_vf(pf, i)
53b8decb
AV
1391 ice_vc_notify_vf_link_state(&pf->vf[i]);
1392}
1393
007676b4
AV
1394/**
1395 * ice_vc_notify_reset - Send pending reset message to all VFs
1396 * @pf: pointer to the PF structure
1397 *
1398 * indicate a pending reset to all VFs on a given PF
1399 */
1400void ice_vc_notify_reset(struct ice_pf *pf)
1401{
1402 struct virtchnl_pf_event pfe;
1403
1404 if (!pf->num_alloc_vfs)
1405 return;
1406
1407 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1408 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
cf6c6e01 1409 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
007676b4
AV
1410 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1411}
1412
7c710869
AV
1413/**
1414 * ice_vc_notify_vf_reset - Notify VF of a reset event
1415 * @vf: pointer to the VF structure
1416 */
1417static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1418{
1419 struct virtchnl_pf_event pfe;
4c66d227 1420 struct ice_pf *pf;
7c710869 1421
4c66d227
JB
1422 if (!vf)
1423 return;
1424
1425 pf = vf->pf;
1426 if (ice_validate_vf_id(pf, vf->vf_id))
7c710869
AV
1427 return;
1428
1f9639d2
AA
1429 /* Bail out if VF is in disabled state, neither initialized, nor active
1430 * state - otherwise proceed with notifications
1431 */
1432 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1433 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1434 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
7c710869
AV
1435 return;
1436
1437 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1438 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
4c66d227 1439 ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
cf6c6e01
MW
1440 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1441 NULL);
7c710869
AV
1442}
1443
916c7fdf
BC
1444/**
1445 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
1446 * @vf: VF to initialize/setup the VSI for
1447 *
1448 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
1449 * VF VSI's broadcast filter and is only used during initial VF creation.
1450 */
1451static int ice_init_vf_vsi_res(struct ice_vf *vf)
1452{
1453 struct ice_pf *pf = vf->pf;
1454 u8 broadcast[ETH_ALEN];
1455 enum ice_status status;
1456 struct ice_vsi *vsi;
1457 struct device *dev;
1458 int err;
1459
1460 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1461
1462 dev = ice_pf_to_dev(pf);
1463 vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
1464 if (!vsi) {
1465 dev_err(dev, "Failed to create VF VSI\n");
1466 return -ENOMEM;
1467 }
1468
1469 vf->lan_vsi_idx = vsi->idx;
1470 vf->lan_vsi_num = vsi->vsi_num;
1471
1472 err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
1473 if (err) {
1474 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1475 vf->vf_id);
1476 goto release_vsi;
1477 }
1478
1479 eth_broadcast_addr(broadcast);
1480 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1481 if (status) {
1482 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
1483 vf->vf_id, ice_stat_str(status));
1484 err = ice_status_to_errno(status);
1485 goto release_vsi;
1486 }
1487
1488 vf->num_mac = 1;
1489
1490 return 0;
1491
1492release_vsi:
1493 ice_vsi_release(vsi);
1494 return err;
1495}
1496
1497/**
1498 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
1499 * @pf: PF the VFs are associated with
1500 */
1501static int ice_start_vfs(struct ice_pf *pf)
1502{
1503 struct ice_hw *hw = &pf->hw;
1504 int retval, i;
1505
1506 ice_for_each_vf(pf, i) {
1507 struct ice_vf *vf = &pf->vf[i];
1508
1509 ice_clear_vf_reset_trigger(vf);
1510
1511 retval = ice_init_vf_vsi_res(vf);
1512 if (retval) {
1513 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1514 vf->vf_id, retval);
1515 goto teardown;
1516 }
1517
1518 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1519 ice_ena_vf_mappings(vf);
1520 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1521 }
1522
1523 ice_flush(hw);
1524 return 0;
1525
1526teardown:
1527 for (i = i - 1; i >= 0; i--) {
1528 struct ice_vf *vf = &pf->vf[i];
1529
1530 ice_dis_vf_mappings(vf);
1531 ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
1532 }
1533
1534 return retval;
1535}
1536
ddf30f7f 1537/**
a06325a0
BC
1538 * ice_set_dflt_settings - set VF defaults during initialization/creation
1539 * @pf: PF holding reference to all VFs for default configuration
1540 */
1541static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1542{
1543 int i;
1544
1545 ice_for_each_vf(pf, i) {
1546 struct ice_vf *vf = &pf->vf[i];
1547
1548 vf->pf = pf;
1549 vf->vf_id = i;
1550 vf->vf_sw_id = pf->first_sw;
1551 /* assign default capabilities */
1552 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
1553 vf->spoofchk = true;
1554 vf->num_vf_qs = pf->num_qps_per_vf;
1555 }
1556}
1557
1558/**
1559 * ice_alloc_vfs - allocate num_vfs in the PF structure
1560 * @pf: PF to store the allocated VFs in
1561 * @num_vfs: number of VFs to allocate
1562 */
1563static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
1564{
1565 struct ice_vf *vfs;
1566
1567 vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
1568 GFP_KERNEL);
1569 if (!vfs)
1570 return -ENOMEM;
1571
1572 pf->vf = vfs;
1573 pf->num_alloc_vfs = num_vfs;
1574
1575 return 0;
1576}
1577
1578/**
1579 * ice_ena_vfs - enable VFs so they are ready to be used
ddf30f7f 1580 * @pf: pointer to the PF structure
a06325a0 1581 * @num_vfs: number of VFs to enable
ddf30f7f 1582 */
a06325a0 1583static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
ddf30f7f 1584{
4015d11e 1585 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f 1586 struct ice_hw *hw = &pf->hw;
a06325a0 1587 int ret;
ddf30f7f
AV
1588
1589 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
cbe66bfe 1590 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
ddf30f7f 1591 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
d82dd83d 1592 set_bit(__ICE_OICR_INTR_DIS, pf->state);
ddf30f7f
AV
1593 ice_flush(hw);
1594
a06325a0 1595 ret = pci_enable_sriov(pf->pdev, num_vfs);
ddf30f7f
AV
1596 if (ret) {
1597 pf->num_alloc_vfs = 0;
1598 goto err_unroll_intr;
1599 }
a06325a0
BC
1600
1601 ret = ice_alloc_vfs(pf, num_vfs);
1602 if (ret)
72f9c203 1603 goto err_pci_disable_sriov;
ddf30f7f 1604
916c7fdf
BC
1605 if (ice_set_per_vf_res(pf)) {
1606 dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
a06325a0 1607 num_vfs);
916c7fdf
BC
1608 ret = -ENOSPC;
1609 goto err_unroll_sriov;
1610 }
1611
a06325a0 1612 ice_set_dflt_settings_vfs(pf);
ddf30f7f 1613
916c7fdf
BC
1614 if (ice_start_vfs(pf)) {
1615 dev_err(dev, "Failed to start VF(s)\n");
1616 ret = -EAGAIN;
ddf30f7f 1617 goto err_unroll_sriov;
72f9c203 1618 }
ddf30f7f 1619
916c7fdf
BC
1620 clear_bit(__ICE_VF_DIS, pf->state);
1621 return 0;
ddf30f7f
AV
1622
1623err_unroll_sriov:
a06325a0 1624 devm_kfree(dev, pf->vf);
72f9c203 1625 pf->vf = NULL;
72f9c203
BC
1626 pf->num_alloc_vfs = 0;
1627err_pci_disable_sriov:
ddf30f7f
AV
1628 pci_disable_sriov(pf->pdev);
1629err_unroll_intr:
1630 /* rearm interrupts here */
1631 ice_irq_dynamic_ena(hw, NULL, NULL);
d82dd83d 1632 clear_bit(__ICE_OICR_INTR_DIS, pf->state);
ddf30f7f
AV
1633 return ret;
1634}
1635
1636/**
2f2da36e
AV
1637 * ice_pf_state_is_nominal - checks the PF for nominal state
1638 * @pf: pointer to PF to check
ddf30f7f
AV
1639 *
1640 * Check the PF's state for a collection of bits that would indicate
1641 * the PF is in a state that would inhibit normal operation for
1642 * driver functionality.
1643 *
1644 * Returns true if PF is in a nominal state.
1645 * Returns false otherwise
1646 */
1647static bool ice_pf_state_is_nominal(struct ice_pf *pf)
1648{
1649 DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
1650
1651 if (!pf)
1652 return false;
1653
1654 bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
1655 if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
1656 return false;
1657
1658 return true;
1659}
1660
1661/**
1662 * ice_pci_sriov_ena - Enable or change number of VFs
1663 * @pf: pointer to the PF structure
1664 * @num_vfs: number of VFs to allocate
02337f1f
BC
1665 *
1666 * Returns 0 on success and negative on failure
ddf30f7f
AV
1667 */
1668static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1669{
1670 int pre_existing_vfs = pci_num_vf(pf->pdev);
4015d11e 1671 struct device *dev = ice_pf_to_dev(pf);
ddf30f7f
AV
1672 int err;
1673
ddf30f7f
AV
1674 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1675 ice_free_vfs(pf);
1676 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
02337f1f 1677 return 0;
ddf30f7f
AV
1678
1679 if (num_vfs > pf->num_vfs_supported) {
1680 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1681 num_vfs, pf->num_vfs_supported);
dced8ad3 1682 return -EOPNOTSUPP;
ddf30f7f
AV
1683 }
1684
a06325a0
BC
1685 dev_info(dev, "Enabling %d VFs\n", num_vfs);
1686 err = ice_ena_vfs(pf, num_vfs);
ddf30f7f
AV
1687 if (err) {
1688 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1689 return err;
1690 }
1691
1692 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
02337f1f
BC
1693 return 0;
1694}
1695
1696/**
1697 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
1698 * @pf: PF to enabled SR-IOV on
1699 */
1700static int ice_check_sriov_allowed(struct ice_pf *pf)
1701{
1702 struct device *dev = ice_pf_to_dev(pf);
1703
1704 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1705 dev_err(dev, "This device is not capable of SR-IOV\n");
1706 return -EOPNOTSUPP;
1707 }
1708
1709 if (ice_is_safe_mode(pf)) {
1710 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
1711 return -EOPNOTSUPP;
1712 }
1713
1714 if (!ice_pf_state_is_nominal(pf)) {
1715 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1716 return -EBUSY;
1717 }
1718
1719 return 0;
ddf30f7f
AV
1720}
1721
1722/**
1723 * ice_sriov_configure - Enable or change number of VFs via sysfs
1724 * @pdev: pointer to a pci_dev structure
02337f1f 1725 * @num_vfs: number of VFs to allocate or 0 to free VFs
ddf30f7f 1726 *
02337f1f
BC
1727 * This function is called when the user updates the number of VFs in sysfs. On
1728 * success return whatever num_vfs was set to by the caller. Return negative on
1729 * failure.
ddf30f7f
AV
1730 */
1731int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1732{
1733 struct ice_pf *pf = pci_get_drvdata(pdev);
4015d11e 1734 struct device *dev = ice_pf_to_dev(pf);
02337f1f 1735 int err;
ddf30f7f 1736
02337f1f
BC
1737 err = ice_check_sriov_allowed(pf);
1738 if (err)
1739 return err;
462acf6a 1740
02337f1f
BC
1741 if (!num_vfs) {
1742 if (!pci_vfs_assigned(pdev)) {
1743 ice_free_vfs(pf);
1744 return 0;
1745 }
ddf30f7f 1746
4015d11e 1747 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
ddf30f7f
AV
1748 return -EBUSY;
1749 }
1750
02337f1f
BC
1751 err = ice_pci_sriov_ena(pf, num_vfs);
1752 if (err)
1753 return err;
1754
1755 return num_vfs;
ddf30f7f 1756}
007676b4
AV
1757
1758/**
1759 * ice_process_vflr_event - Free VF resources via IRQ calls
1760 * @pf: pointer to the PF structure
1761 *
df17b7e0 1762 * called from the VFLR IRQ handler to
007676b4
AV
1763 * free up VF resources and state variables
1764 */
1765void ice_process_vflr_event(struct ice_pf *pf)
1766{
1767 struct ice_hw *hw = &pf->hw;
53bb6698 1768 unsigned int vf_id;
007676b4
AV
1769 u32 reg;
1770
8d7189d2 1771 if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
007676b4
AV
1772 !pf->num_alloc_vfs)
1773 return;
1774
005881bc 1775 ice_for_each_vf(pf, vf_id) {
007676b4
AV
1776 struct ice_vf *vf = &pf->vf[vf_id];
1777 u32 reg_idx, bit_idx;
1778
1779 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1780 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1781 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1782 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1783 if (reg & BIT(bit_idx))
1784 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1785 ice_reset_vf(vf, true);
1786 }
1787}
7c710869
AV
1788
1789/**
ff010eca 1790 * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
7c710869 1791 * @vf: pointer to the VF info
7c710869 1792 */
ff010eca 1793static void ice_vc_reset_vf(struct ice_vf *vf)
7c710869
AV
1794{
1795 ice_vc_notify_vf_reset(vf);
1796 ice_reset_vf(vf, false);
1797}
1798
2309ae38
BC
1799/**
1800 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1801 * @pf: PF used to index all VFs
1802 * @pfq: queue index relative to the PF's function space
1803 *
1804 * If no VF is found who owns the pfq then return NULL, otherwise return a
1805 * pointer to the VF who owns the pfq
1806 */
1807static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1808{
53bb6698 1809 unsigned int vf_id;
2309ae38
BC
1810
1811 ice_for_each_vf(pf, vf_id) {
1812 struct ice_vf *vf = &pf->vf[vf_id];
1813 struct ice_vsi *vsi;
1814 u16 rxq_idx;
1815
1816 vsi = pf->vsi[vf->lan_vsi_idx];
1817
1818 ice_for_each_rxq(vsi, rxq_idx)
1819 if (vsi->rxq_map[rxq_idx] == pfq)
1820 return vf;
1821 }
1822
1823 return NULL;
1824}
1825
1826/**
1827 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1828 * @pf: PF used for conversion
1829 * @globalq: global queue index used to convert to PF space queue index
1830 */
1831static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1832{
1833 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1834}
1835
1836/**
1837 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1838 * @pf: PF that the LAN overflow event happened on
1839 * @event: structure holding the event information for the LAN overflow event
1840 *
1841 * Determine if the LAN overflow event was caused by a VF queue. If it was not
1842 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1843 * reset on the offending VF.
1844 */
1845void
1846ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1847{
1848 u32 gldcb_rtctq, queue;
1849 struct ice_vf *vf;
1850
1851 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1852 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1853
1854 /* event returns device global Rx queue number */
1855 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
1856 GLDCB_RTCTQ_RXQNUM_S;
1857
1858 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1859 if (!vf)
1860 return;
1861
1862 ice_vc_reset_vf(vf);
1863}
1864
1071a835
AV
1865/**
1866 * ice_vc_send_msg_to_vf - Send message to VF
1867 * @vf: pointer to the VF info
1868 * @v_opcode: virtual channel opcode
1869 * @v_retval: virtual channel return value
1870 * @msg: pointer to the msg buffer
1871 * @msglen: msg length
1872 *
1873 * send msg to VF
1874 */
c8b7abdd 1875static int
cf6c6e01
MW
1876ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1877 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1071a835
AV
1878{
1879 enum ice_status aq_ret;
4015d11e 1880 struct device *dev;
1071a835
AV
1881 struct ice_pf *pf;
1882
4c66d227 1883 if (!vf)
1071a835
AV
1884 return -EINVAL;
1885
1886 pf = vf->pf;
4c66d227
JB
1887 if (ice_validate_vf_id(pf, vf->vf_id))
1888 return -EINVAL;
1071a835 1889
4015d11e
BC
1890 dev = ice_pf_to_dev(pf);
1891
1071a835
AV
1892 /* single place to detect unsuccessful return values */
1893 if (v_retval) {
1894 vf->num_inval_msgs++;
4015d11e
BC
1895 dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
1896 v_opcode, v_retval);
1071a835 1897 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
19cce2c6 1898 dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
1071a835 1899 vf->vf_id);
4015d11e 1900 dev_err(dev, "Use PF Control I/F to enable the VF\n");
1071a835
AV
1901 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1902 return -EIO;
1903 }
1904 } else {
1905 vf->num_valid_msgs++;
1906 /* reset the invalid counter, if a valid message is received. */
1907 vf->num_inval_msgs = 0;
1908 }
1909
1910 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1911 msg, msglen, NULL);
90e47737 1912 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
0fee3577
LY
1913 dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
1914 vf->vf_id, ice_stat_str(aq_ret),
1915 ice_aq_str(pf->hw.mailboxq.sq_last_status));
1071a835
AV
1916 return -EIO;
1917 }
1918
1919 return 0;
1920}
1921
1922/**
1923 * ice_vc_get_ver_msg
1924 * @vf: pointer to the VF info
1925 * @msg: pointer to the msg buffer
1926 *
1927 * called from the VF to request the API version used by the PF
1928 */
1929static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1930{
1931 struct virtchnl_version_info info = {
1932 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1933 };
1934
1935 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1936 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1937 if (VF_IS_V10(&vf->vf_ver))
1938 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1939
cf6c6e01
MW
1940 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1941 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1071a835
AV
1942 sizeof(struct virtchnl_version_info));
1943}
1944
1945/**
1946 * ice_vc_get_vf_res_msg
1947 * @vf: pointer to the VF info
1948 * @msg: pointer to the msg buffer
1949 *
1950 * called from the VF to request its resources
1951 */
1952static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1953{
cf6c6e01 1954 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835 1955 struct virtchnl_vf_resource *vfres = NULL;
1071a835
AV
1956 struct ice_pf *pf = vf->pf;
1957 struct ice_vsi *vsi;
1958 int len = 0;
1959 int ret;
1960
4c66d227 1961 if (ice_check_vf_init(pf, vf)) {
cf6c6e01 1962 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
1963 goto err;
1964 }
1965
1966 len = sizeof(struct virtchnl_vf_resource);
1967
9efe35d0 1968 vfres = kzalloc(len, GFP_KERNEL);
1071a835 1969 if (!vfres) {
cf6c6e01 1970 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1071a835
AV
1971 len = 0;
1972 goto err;
1973 }
1974 if (VF_IS_V11(&vf->vf_ver))
1975 vf->driver_caps = *(u32 *)msg;
1976 else
1977 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1978 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1979 VIRTCHNL_VF_OFFLOAD_VLAN;
1980
1981 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1982 vsi = pf->vsi[vf->lan_vsi_idx];
f1ef73f5 1983 if (!vsi) {
cf6c6e01 1984 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
1985 goto err;
1986 }
1987
1071a835
AV
1988 if (!vsi->info.pvid)
1989 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1990
1991 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1992 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1993 } else {
1994 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1995 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1996 else
1997 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1998 }
1999
2000 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2001 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2002
2003 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2004 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2005
2006 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
2007 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2008
2009 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
2010 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2011
2012 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2013 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2014
2015 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2016 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2017
2018 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
2019 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2020
2021 vfres->num_vsis = 1;
2022 /* Tx and Rx queue are equal for VF */
2023 vfres->num_queue_pairs = vsi->num_txq;
46c276ce 2024 vfres->max_vectors = pf->num_msix_per_vf;
1071a835
AV
2025 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
2026 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
2027
2028 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
2029 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2030 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
2031 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2032 vf->dflt_lan_addr.addr);
2033
d4bc4e2d
BC
2034 /* match guest capabilities */
2035 vf->driver_caps = vfres->vf_cap_flags;
2036
1071a835
AV
2037 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
2038
2039err:
2040 /* send the response back to the VF */
cf6c6e01 2041 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1071a835
AV
2042 (u8 *)vfres, len);
2043
9efe35d0 2044 kfree(vfres);
1071a835
AV
2045 return ret;
2046}
2047
2048/**
2049 * ice_vc_reset_vf_msg
2050 * @vf: pointer to the VF info
2051 *
2052 * called from the VF to reset itself,
2053 * unlike other virtchnl messages, PF driver
2054 * doesn't send the response back to the VF
2055 */
2056static void ice_vc_reset_vf_msg(struct ice_vf *vf)
2057{
2058 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
2059 ice_reset_vf(vf, false);
2060}
2061
2062/**
2063 * ice_find_vsi_from_id
2f2da36e 2064 * @pf: the PF structure to search for the VSI
f9867df6 2065 * @id: ID of the VSI it is searching for
1071a835 2066 *
f9867df6 2067 * searches for the VSI with the given ID
1071a835
AV
2068 */
2069static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
2070{
2071 int i;
2072
80ed404a 2073 ice_for_each_vsi(pf, i)
1071a835
AV
2074 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
2075 return pf->vsi[i];
2076
2077 return NULL;
2078}
2079
2080/**
2081 * ice_vc_isvalid_vsi_id
2082 * @vf: pointer to the VF info
f9867df6 2083 * @vsi_id: VF relative VSI ID
1071a835 2084 *
f9867df6 2085 * check for the valid VSI ID
1071a835
AV
2086 */
2087static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
2088{
2089 struct ice_pf *pf = vf->pf;
2090 struct ice_vsi *vsi;
2091
2092 vsi = ice_find_vsi_from_id(pf, vsi_id);
2093
2094 return (vsi && (vsi->vf_id == vf->vf_id));
2095}
2096
2097/**
2098 * ice_vc_isvalid_q_id
2099 * @vf: pointer to the VF info
f9867df6
AV
2100 * @vsi_id: VSI ID
2101 * @qid: VSI relative queue ID
1071a835 2102 *
f9867df6 2103 * check for the valid queue ID
1071a835
AV
2104 */
2105static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2106{
2107 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2108 /* allocated Tx and Rx queues should be always equal for VF VSI */
2109 return (vsi && (qid < vsi->alloc_txq));
2110}
2111
9c7dd756
MS
2112/**
2113 * ice_vc_isvalid_ring_len
2114 * @ring_len: length of ring
2115 *
2116 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
77ca27c4 2117 * or zero
9c7dd756
MS
2118 */
2119static bool ice_vc_isvalid_ring_len(u16 ring_len)
2120{
77ca27c4
PG
2121 return ring_len == 0 ||
2122 (ring_len >= ICE_MIN_NUM_DESC &&
9c7dd756
MS
2123 ring_len <= ICE_MAX_NUM_DESC &&
2124 !(ring_len % ICE_REQ_DESC_MULTIPLE));
2125}
2126
1071a835
AV
2127/**
2128 * ice_vc_config_rss_key
2129 * @vf: pointer to the VF info
2130 * @msg: pointer to the msg buffer
2131 *
2132 * Configure the VF's RSS key
2133 */
2134static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2135{
cf6c6e01 2136 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2137 struct virtchnl_rss_key *vrk =
2138 (struct virtchnl_rss_key *)msg;
f1ef73f5 2139 struct ice_pf *pf = vf->pf;
4c66d227 2140 struct ice_vsi *vsi;
1071a835
AV
2141
2142 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2143 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2144 goto error_param;
2145 }
2146
2147 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
cf6c6e01 2148 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2149 goto error_param;
2150 }
2151
3f416961 2152 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
cf6c6e01 2153 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2154 goto error_param;
2155 }
2156
3f416961 2157 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
cf6c6e01 2158 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2159 goto error_param;
2160 }
2161
3f416961
A
2162 vsi = pf->vsi[vf->lan_vsi_idx];
2163 if (!vsi) {
cf6c6e01 2164 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2165 goto error_param;
2166 }
2167
cf6c6e01
MW
2168 if (ice_set_rss(vsi, vrk->key, NULL, 0))
2169 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835 2170error_param:
cf6c6e01 2171 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
1071a835
AV
2172 NULL, 0);
2173}
2174
2175/**
2176 * ice_vc_config_rss_lut
2177 * @vf: pointer to the VF info
2178 * @msg: pointer to the msg buffer
2179 *
2180 * Configure the VF's RSS LUT
2181 */
2182static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2183{
2184 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
cf6c6e01 2185 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
f1ef73f5 2186 struct ice_pf *pf = vf->pf;
4c66d227 2187 struct ice_vsi *vsi;
1071a835
AV
2188
2189 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2190 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2191 goto error_param;
2192 }
2193
2194 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
cf6c6e01 2195 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2196 goto error_param;
2197 }
2198
3f416961 2199 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
cf6c6e01 2200 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2201 goto error_param;
2202 }
2203
3f416961 2204 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
cf6c6e01 2205 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2206 goto error_param;
2207 }
2208
3f416961
A
2209 vsi = pf->vsi[vf->lan_vsi_idx];
2210 if (!vsi) {
cf6c6e01 2211 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2212 goto error_param;
2213 }
2214
cf6c6e01
MW
2215 if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
2216 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835 2217error_param:
cf6c6e01 2218 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
1071a835
AV
2219 NULL, 0);
2220}
2221
c54d209c
BC
2222/**
2223 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
2224 * @vf: The VF being resseting
2225 *
2226 * The max poll time is about ~800ms, which is about the maximum time it takes
2227 * for a VF to be reset and/or a VF driver to be removed.
2228 */
2229static void ice_wait_on_vf_reset(struct ice_vf *vf)
2230{
2231 int i;
2232
2233 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2234 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2235 break;
2236 msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2237 }
2238}
2239
2240/**
2241 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
2242 * @vf: VF to check if it's ready to be configured/queried
2243 *
2244 * The purpose of this function is to make sure the VF is not in reset, not
2245 * disabled, and initialized so it can be configured and/or queried by a host
2246 * administrator.
2247 */
2248static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2249{
2250 struct ice_pf *pf;
2251
2252 ice_wait_on_vf_reset(vf);
2253
2254 if (ice_is_vf_disabled(vf))
2255 return -EINVAL;
2256
2257 pf = vf->pf;
2258 if (ice_check_vf_init(pf, vf))
2259 return -EBUSY;
2260
2261 return 0;
2262}
2263
cd6d6b83
BC
2264/**
2265 * ice_set_vf_spoofchk
2266 * @netdev: network interface device structure
2267 * @vf_id: VF identifier
2268 * @ena: flag to enable or disable feature
2269 *
2270 * Enable or disable VF spoof checking
2271 */
2272int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2273{
2274 struct ice_netdev_priv *np = netdev_priv(netdev);
2275 struct ice_pf *pf = np->vsi->back;
2276 struct ice_vsi_ctx *ctx;
2277 struct ice_vsi *vf_vsi;
2278 enum ice_status status;
2279 struct device *dev;
2280 struct ice_vf *vf;
c54d209c 2281 int ret;
cd6d6b83
BC
2282
2283 dev = ice_pf_to_dev(pf);
2284 if (ice_validate_vf_id(pf, vf_id))
2285 return -EINVAL;
2286
2287 vf = &pf->vf[vf_id];
c54d209c
BC
2288 ret = ice_check_vf_ready_for_cfg(vf);
2289 if (ret)
2290 return ret;
cd6d6b83
BC
2291
2292 vf_vsi = pf->vsi[vf->lan_vsi_idx];
2293 if (!vf_vsi) {
2294 netdev_err(netdev, "VSI %d for VF %d is null\n",
2295 vf->lan_vsi_idx, vf->vf_id);
2296 return -EINVAL;
2297 }
2298
2299 if (vf_vsi->type != ICE_VSI_VF) {
19cce2c6 2300 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
cd6d6b83
BC
2301 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2302 return -ENODEV;
2303 }
2304
2305 if (ena == vf->spoofchk) {
2306 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2307 return 0;
2308 }
2309
2310 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2311 if (!ctx)
2312 return -ENOMEM;
2313
2314 ctx->info.sec_flags = vf_vsi->info.sec_flags;
2315 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2316 if (ena) {
2317 ctx->info.sec_flags |=
2318 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2319 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2320 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2321 } else {
2322 ctx->info.sec_flags &=
2323 ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2324 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2325 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2326 }
2327
2328 status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2329 if (status) {
0fee3577
LY
2330 dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
2331 ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
2332 ice_stat_str(status));
cd6d6b83
BC
2333 ret = -EIO;
2334 goto out;
2335 }
2336
2337 /* only update spoofchk state and VSI context on success */
2338 vf_vsi->info.sec_flags = ctx->info.sec_flags;
2339 vf->spoofchk = ena;
2340
2341out:
2342 kfree(ctx);
2343 return ret;
2344}
2345
01b5e89a
BC
2346/**
2347 * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
2348 * @pf: PF structure for accessing VF(s)
2349 *
2350 * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
2351 * else return true
2352 */
2353bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2354{
2355 int vf_idx;
2356
2357 ice_for_each_vf(pf, vf_idx) {
2358 struct ice_vf *vf = &pf->vf[vf_idx];
2359
2360 /* found a VF that has promiscuous mode configured */
2361 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2362 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2363 return true;
2364 }
2365
2366 return false;
2367}
2368
2369/**
2370 * ice_vc_cfg_promiscuous_mode_msg
2371 * @vf: pointer to the VF info
2372 * @msg: pointer to the msg buffer
2373 *
2374 * called from the VF to configure VF VSIs promiscuous mode
2375 */
2376static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2377{
2378 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2379 struct virtchnl_promisc_info *info =
2380 (struct virtchnl_promisc_info *)msg;
2381 struct ice_pf *pf = vf->pf;
2382 struct ice_vsi *vsi;
2383 struct device *dev;
2384 bool rm_promisc;
2385 int ret = 0;
2386
2387 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2388 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2389 goto error_param;
2390 }
2391
2392 if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2393 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2394 goto error_param;
2395 }
2396
2397 vsi = pf->vsi[vf->lan_vsi_idx];
2398 if (!vsi) {
2399 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2400 goto error_param;
2401 }
2402
2403 dev = ice_pf_to_dev(pf);
2404 if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2405 dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2406 vf->vf_id);
2407 /* Leave v_ret alone, lie to the VF on purpose. */
2408 goto error_param;
2409 }
2410
2411 rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
2412 !(info->flags & FLAG_VF_MULTICAST_PROMISC);
2413
2414 if (vsi->num_vlan || vf->port_vlan_info) {
2415 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2416 struct net_device *pf_netdev;
2417
2418 if (!pf_vsi) {
2419 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2420 goto error_param;
2421 }
2422
2423 pf_netdev = pf_vsi->netdev;
2424
2425 ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
2426 if (ret) {
2427 dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
2428 rm_promisc ? "ON" : "OFF", vf->vf_id,
2429 vsi->vsi_num);
2430 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2431 }
2432
2433 ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
2434 if (ret) {
2435 dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
2436 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2437 goto error_param;
2438 }
2439 }
2440
2441 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
2442 bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
2443
2444 if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
2445 /* only attempt to set the default forwarding VSI if
2446 * it's not currently set
2447 */
2448 ret = ice_set_dflt_vsi(pf->first_sw, vsi);
2449 else if (!set_dflt_vsi &&
2450 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
2451 /* only attempt to free the default forwarding VSI if we
2452 * are the owner
2453 */
2454 ret = ice_clear_dflt_vsi(pf->first_sw);
2455
2456 if (ret) {
2457 dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
2458 set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
2459 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2460 goto error_param;
2461 }
2462 } else {
2463 enum ice_status status;
2464 u8 promisc_m;
2465
2466 if (info->flags & FLAG_VF_UNICAST_PROMISC) {
2467 if (vf->port_vlan_info || vsi->num_vlan)
2468 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2469 else
2470 promisc_m = ICE_UCAST_PROMISC_BITS;
2471 } else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
2472 if (vf->port_vlan_info || vsi->num_vlan)
2473 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
2474 else
2475 promisc_m = ICE_MCAST_PROMISC_BITS;
2476 } else {
2477 if (vf->port_vlan_info || vsi->num_vlan)
2478 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2479 else
2480 promisc_m = ICE_UCAST_PROMISC_BITS;
2481 }
2482
2483 /* Configure multicast/unicast with or without VLAN promiscuous
2484 * mode
2485 */
2486 status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
2487 if (status) {
0fee3577
LY
2488 dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
2489 rm_promisc ? "dis" : "en", vf->vf_id,
2490 ice_stat_str(status));
01b5e89a
BC
2491 v_ret = ice_err_to_virt_err(status);
2492 goto error_param;
2493 } else {
2494 dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
2495 rm_promisc ? "dis" : "en", vf->vf_id);
2496 }
2497 }
2498
2499 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2500 set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2501 else
2502 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2503
2504 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2505 set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2506 else
2507 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2508
2509error_param:
2510 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2511 v_ret, NULL, 0);
2512}
2513
1071a835
AV
2514/**
2515 * ice_vc_get_stats_msg
2516 * @vf: pointer to the VF info
2517 * @msg: pointer to the msg buffer
2518 *
2519 * called from the VF to get VSI stats
2520 */
2521static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
2522{
cf6c6e01 2523 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2524 struct virtchnl_queue_select *vqs =
2525 (struct virtchnl_queue_select *)msg;
949375de 2526 struct ice_eth_stats stats = { 0 };
f1ef73f5 2527 struct ice_pf *pf = vf->pf;
1071a835
AV
2528 struct ice_vsi *vsi;
2529
2530 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2531 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2532 goto error_param;
2533 }
2534
2535 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 2536 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2537 goto error_param;
2538 }
2539
f1ef73f5 2540 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 2541 if (!vsi) {
cf6c6e01 2542 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2543 goto error_param;
2544 }
2545
1071a835
AV
2546 ice_update_eth_stats(vsi);
2547
2548 stats = vsi->eth_stats;
2549
2550error_param:
2551 /* send the response to the VF */
cf6c6e01 2552 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1071a835
AV
2553 (u8 *)&stats, sizeof(stats));
2554}
2555
24e2e2a0
BC
2556/**
2557 * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
2558 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2559 *
2560 * Return true on successful validation, else false
2561 */
2562static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2563{
2564 if ((!vqs->rx_queues && !vqs->tx_queues) ||
0ca469fb
MW
2565 vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
2566 vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
24e2e2a0
BC
2567 return false;
2568
2569 return true;
2570}
2571
4dc926d3
BC
2572/**
2573 * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
2574 * @vsi: VSI of the VF to configure
2575 * @q_idx: VF queue index used to determine the queue in the PF's space
2576 */
2577static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2578{
2579 struct ice_hw *hw = &vsi->back->hw;
2580 u32 pfq = vsi->txq_map[q_idx];
2581 u32 reg;
2582
2583 reg = rd32(hw, QINT_TQCTL(pfq));
2584
2585 /* MSI-X index 0 in the VF's space is always for the OICR, which means
2586 * this is most likely a poll mode VF driver, so don't enable an
2587 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2588 */
2589 if (!(reg & QINT_TQCTL_MSIX_INDX_M))
2590 return;
2591
2592 wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
2593}
2594
2595/**
2596 * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
2597 * @vsi: VSI of the VF to configure
2598 * @q_idx: VF queue index used to determine the queue in the PF's space
2599 */
2600static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2601{
2602 struct ice_hw *hw = &vsi->back->hw;
2603 u32 pfq = vsi->rxq_map[q_idx];
2604 u32 reg;
2605
2606 reg = rd32(hw, QINT_RQCTL(pfq));
2607
2608 /* MSI-X index 0 in the VF's space is always for the OICR, which means
2609 * this is most likely a poll mode VF driver, so don't enable an
2610 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2611 */
2612 if (!(reg & QINT_RQCTL_MSIX_INDX_M))
2613 return;
2614
2615 wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
2616}
2617
1071a835
AV
2618/**
2619 * ice_vc_ena_qs_msg
2620 * @vf: pointer to the VF info
2621 * @msg: pointer to the msg buffer
2622 *
2623 * called from the VF to enable all or specific queue(s)
2624 */
2625static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
2626{
cf6c6e01 2627 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2628 struct virtchnl_queue_select *vqs =
2629 (struct virtchnl_queue_select *)msg;
f1ef73f5 2630 struct ice_pf *pf = vf->pf;
1071a835 2631 struct ice_vsi *vsi;
77ca27c4
PG
2632 unsigned long q_map;
2633 u16 vf_q_id;
1071a835
AV
2634
2635 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2636 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2637 goto error_param;
2638 }
2639
2640 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 2641 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2642 goto error_param;
2643 }
2644
24e2e2a0 2645 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
3f416961
A
2646 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2647 goto error_param;
2648 }
2649
f1ef73f5 2650 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 2651 if (!vsi) {
cf6c6e01 2652 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2653 goto error_param;
2654 }
2655
2656 /* Enable only Rx rings, Tx rings were enabled by the FW when the
2657 * Tx queue group list was configured and the context bits were
2658 * programmed using ice_vsi_cfg_txqs
2659 */
77ca27c4 2660 q_map = vqs->rx_queues;
0ca469fb 2661 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
2662 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2663 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2664 goto error_param;
2665 }
2666
2667 /* Skip queue if enabled */
2668 if (test_bit(vf_q_id, vf->rxq_ena))
2669 continue;
2670
13a6233b 2671 if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
19cce2c6 2672 dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
77ca27c4
PG
2673 vf_q_id, vsi->vsi_num);
2674 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2675 goto error_param;
2676 }
2677
4dc926d3 2678 ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
77ca27c4 2679 set_bit(vf_q_id, vf->rxq_ena);
77ca27c4
PG
2680 }
2681
2682 vsi = pf->vsi[vf->lan_vsi_idx];
2683 q_map = vqs->tx_queues;
0ca469fb 2684 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
2685 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2686 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2687 goto error_param;
2688 }
2689
2690 /* Skip queue if enabled */
2691 if (test_bit(vf_q_id, vf->txq_ena))
2692 continue;
2693
4dc926d3 2694 ice_vf_ena_txq_interrupt(vsi, vf_q_id);
77ca27c4 2695 set_bit(vf_q_id, vf->txq_ena);
77ca27c4 2696 }
1071a835
AV
2697
2698 /* Set flag to indicate that queues are enabled */
cf6c6e01 2699 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
77ca27c4 2700 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1071a835
AV
2701
2702error_param:
2703 /* send the response to the VF */
cf6c6e01 2704 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
1071a835
AV
2705 NULL, 0);
2706}
2707
2708/**
2709 * ice_vc_dis_qs_msg
2710 * @vf: pointer to the VF info
2711 * @msg: pointer to the msg buffer
2712 *
2713 * called from the VF to disable all or specific
2714 * queue(s)
2715 */
2716static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2717{
cf6c6e01 2718 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2719 struct virtchnl_queue_select *vqs =
2720 (struct virtchnl_queue_select *)msg;
f1ef73f5 2721 struct ice_pf *pf = vf->pf;
1071a835 2722 struct ice_vsi *vsi;
77ca27c4
PG
2723 unsigned long q_map;
2724 u16 vf_q_id;
1071a835
AV
2725
2726 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
77ca27c4 2727 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
cf6c6e01 2728 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2729 goto error_param;
2730 }
2731
2732 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
cf6c6e01 2733 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2734 goto error_param;
2735 }
2736
24e2e2a0 2737 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
cf6c6e01 2738 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2739 goto error_param;
2740 }
2741
f1ef73f5 2742 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 2743 if (!vsi) {
cf6c6e01 2744 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2745 goto error_param;
2746 }
2747
77ca27c4
PG
2748 if (vqs->tx_queues) {
2749 q_map = vqs->tx_queues;
2750
0ca469fb 2751 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
2752 struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2753 struct ice_txq_meta txq_meta = { 0 };
2754
2755 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2756 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2757 goto error_param;
2758 }
2759
2760 /* Skip queue if not enabled */
2761 if (!test_bit(vf_q_id, vf->txq_ena))
2762 continue;
2763
2764 ice_fill_txq_meta(vsi, ring, &txq_meta);
2765
2766 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2767 ring, &txq_meta)) {
19cce2c6 2768 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
77ca27c4
PG
2769 vf_q_id, vsi->vsi_num);
2770 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2771 goto error_param;
2772 }
2773
2774 /* Clear enabled queues flag */
2775 clear_bit(vf_q_id, vf->txq_ena);
77ca27c4 2776 }
1071a835
AV
2777 }
2778
e1fe6926
BC
2779 q_map = vqs->rx_queues;
2780 /* speed up Rx queue disable by batching them if possible */
2781 if (q_map &&
0ca469fb 2782 bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
e1fe6926
BC
2783 if (ice_vsi_stop_all_rx_rings(vsi)) {
2784 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
2785 vsi->vsi_num);
2786 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2787 goto error_param;
2788 }
77ca27c4 2789
0ca469fb 2790 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
e1fe6926 2791 } else if (q_map) {
0ca469fb 2792 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
77ca27c4
PG
2793 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2794 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2795 goto error_param;
2796 }
2797
2798 /* Skip queue if not enabled */
2799 if (!test_bit(vf_q_id, vf->rxq_ena))
2800 continue;
2801
13a6233b
BC
2802 if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
2803 true)) {
19cce2c6 2804 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
77ca27c4
PG
2805 vf_q_id, vsi->vsi_num);
2806 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2807 goto error_param;
2808 }
2809
2810 /* Clear enabled queues flag */
2811 clear_bit(vf_q_id, vf->rxq_ena);
77ca27c4 2812 }
1071a835
AV
2813 }
2814
2815 /* Clear enabled queues flag */
e1fe6926 2816 if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
77ca27c4 2817 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1071a835
AV
2818
2819error_param:
2820 /* send the response to the VF */
cf6c6e01 2821 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
1071a835
AV
2822 NULL, 0);
2823}
2824
0ca469fb
MW
2825/**
2826 * ice_cfg_interrupt
2827 * @vf: pointer to the VF info
2828 * @vsi: the VSI being configured
2829 * @vector_id: vector ID
2830 * @map: vector map for mapping vectors to queues
2831 * @q_vector: structure for interrupt vector
2832 * configure the IRQ to queue map
2833 */
2834static int
2835ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
2836 struct virtchnl_vector_map *map,
2837 struct ice_q_vector *q_vector)
2838{
2839 u16 vsi_q_id, vsi_q_id_idx;
2840 unsigned long qmap;
2841
2842 q_vector->num_ring_rx = 0;
2843 q_vector->num_ring_tx = 0;
2844
2845 qmap = map->rxq_map;
2846 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2847 vsi_q_id = vsi_q_id_idx;
2848
2849 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2850 return VIRTCHNL_STATUS_ERR_PARAM;
2851
2852 q_vector->num_ring_rx++;
2853 q_vector->rx.itr_idx = map->rxitr_idx;
2854 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2855 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2856 q_vector->rx.itr_idx);
2857 }
2858
2859 qmap = map->txq_map;
2860 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2861 vsi_q_id = vsi_q_id_idx;
2862
2863 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2864 return VIRTCHNL_STATUS_ERR_PARAM;
2865
2866 q_vector->num_ring_tx++;
2867 q_vector->tx.itr_idx = map->txitr_idx;
2868 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2869 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2870 q_vector->tx.itr_idx);
2871 }
2872
2873 return VIRTCHNL_STATUS_SUCCESS;
2874}
2875
1071a835
AV
2876/**
2877 * ice_vc_cfg_irq_map_msg
2878 * @vf: pointer to the VF info
2879 * @msg: pointer to the msg buffer
2880 *
2881 * called from the VF to configure the IRQ to queue map
2882 */
2883static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2884{
cf6c6e01 2885 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
0ca469fb 2886 u16 num_q_vectors_mapped, vsi_id, vector_id;
173e23c0 2887 struct virtchnl_irq_map_info *irqmap_info;
1071a835 2888 struct virtchnl_vector_map *map;
1071a835 2889 struct ice_pf *pf = vf->pf;
173e23c0 2890 struct ice_vsi *vsi;
1071a835
AV
2891 int i;
2892
173e23c0 2893 irqmap_info = (struct virtchnl_irq_map_info *)msg;
047e52c0
AV
2894 num_q_vectors_mapped = irqmap_info->num_vectors;
2895
047e52c0
AV
2896 /* Check to make sure number of VF vectors mapped is not greater than
2897 * number of VF vectors originally allocated, and check that
2898 * there is actually at least a single VF queue vector mapped
2899 */
ba0db585 2900 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
46c276ce 2901 pf->num_msix_per_vf < num_q_vectors_mapped ||
0ca469fb 2902 !num_q_vectors_mapped) {
cf6c6e01 2903 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2904 goto error_param;
2905 }
2906
3f416961
A
2907 vsi = pf->vsi[vf->lan_vsi_idx];
2908 if (!vsi) {
2909 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2910 goto error_param;
2911 }
2912
047e52c0
AV
2913 for (i = 0; i < num_q_vectors_mapped; i++) {
2914 struct ice_q_vector *q_vector;
ba0db585 2915
1071a835
AV
2916 map = &irqmap_info->vecmap[i];
2917
2918 vector_id = map->vector_id;
2919 vsi_id = map->vsi_id;
b791cdd5
BC
2920 /* vector_id is always 0-based for each VF, and can never be
2921 * larger than or equal to the max allowed interrupts per VF
2922 */
46c276ce 2923 if (!(vector_id < pf->num_msix_per_vf) ||
b791cdd5 2924 !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
047e52c0
AV
2925 (!vector_id && (map->rxq_map || map->txq_map))) {
2926 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2927 goto error_param;
2928 }
2929
2930 /* No need to map VF miscellaneous or rogue vector */
2931 if (!vector_id)
2932 continue;
2933
2934 /* Subtract non queue vector from vector_id passed by VF
2935 * to get actual number of VSI queue vector array index
2936 */
2937 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2938 if (!q_vector) {
cf6c6e01 2939 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2940 goto error_param;
2941 }
2942
1071a835 2943 /* lookout for the invalid queue index */
0ca469fb
MW
2944 v_ret = (enum virtchnl_status_code)
2945 ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
2946 if (v_ret)
2947 goto error_param;
1071a835
AV
2948 }
2949
1071a835
AV
2950error_param:
2951 /* send the response to the VF */
cf6c6e01 2952 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
1071a835
AV
2953 NULL, 0);
2954}
2955
2956/**
2957 * ice_vc_cfg_qs_msg
2958 * @vf: pointer to the VF info
2959 * @msg: pointer to the msg buffer
2960 *
2961 * called from the VF to configure the Rx/Tx queues
2962 */
2963static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2964{
cf6c6e01 2965 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
2966 struct virtchnl_vsi_queue_config_info *qci =
2967 (struct virtchnl_vsi_queue_config_info *)msg;
2968 struct virtchnl_queue_pair_info *qpi;
77ca27c4 2969 u16 num_rxq = 0, num_txq = 0;
5743020d 2970 struct ice_pf *pf = vf->pf;
1071a835
AV
2971 struct ice_vsi *vsi;
2972 int i;
2973
2974 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 2975 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2976 goto error_param;
2977 }
2978
2979 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
cf6c6e01 2980 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
2981 goto error_param;
2982 }
2983
9c7dd756
MS
2984 vsi = pf->vsi[vf->lan_vsi_idx];
2985 if (!vsi) {
cf6c6e01 2986 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5743020d
AA
2987 goto error_param;
2988 }
2989
0ca469fb 2990 if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
9c7dd756 2991 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
19cce2c6 2992 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
9c7dd756 2993 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
3f416961
A
2994 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2995 goto error_param;
2996 }
2997
1071a835
AV
2998 for (i = 0; i < qci->num_queue_pairs; i++) {
2999 qpi = &qci->qpair[i];
3000 if (qpi->txq.vsi_id != qci->vsi_id ||
3001 qpi->rxq.vsi_id != qci->vsi_id ||
3002 qpi->rxq.queue_id != qpi->txq.queue_id ||
f8af5bf5 3003 qpi->txq.headwb_enabled ||
9c7dd756
MS
3004 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
3005 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
1071a835 3006 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
cf6c6e01 3007 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3008 goto error_param;
3009 }
3010 /* copy Tx queue info from VF into VSI */
77ca27c4
PG
3011 if (qpi->txq.ring_len > 0) {
3012 num_txq++;
3013 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
3014 vsi->tx_rings[i]->count = qpi->txq.ring_len;
1071a835 3015 }
77ca27c4
PG
3016
3017 /* copy Rx queue info from VF into VSI */
3018 if (qpi->rxq.ring_len > 0) {
3019 num_rxq++;
3020 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
3021 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
3022
3023 if (qpi->rxq.databuffer_size != 0 &&
3024 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
3025 qpi->rxq.databuffer_size < 1024)) {
3026 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3027 goto error_param;
3028 }
3029 vsi->rx_buf_len = qpi->rxq.databuffer_size;
3030 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
3031 if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
3032 qpi->rxq.max_pkt_size < 64) {
3033 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3034 goto error_param;
3035 }
1071a835 3036 }
77ca27c4 3037
1071a835
AV
3038 vsi->max_frame = qpi->rxq.max_pkt_size;
3039 }
3040
3041 /* VF can request to configure less than allocated queues
3042 * or default allocated queues. So update the VSI with new number
3043 */
77ca27c4
PG
3044 vsi->num_txq = num_txq;
3045 vsi->num_rxq = num_rxq;
105e5bc2 3046 /* All queues of VF VSI are in TC 0 */
77ca27c4
PG
3047 vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
3048 vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
1071a835 3049
cf6c6e01
MW
3050 if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
3051 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835
AV
3052
3053error_param:
3054 /* send the response to the VF */
cf6c6e01 3055 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
1071a835
AV
3056 NULL, 0);
3057}
3058
3059/**
3060 * ice_is_vf_trusted
3061 * @vf: pointer to the VF info
3062 */
3063static bool ice_is_vf_trusted(struct ice_vf *vf)
3064{
3065 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
3066}
3067
3068/**
3069 * ice_can_vf_change_mac
3070 * @vf: pointer to the VF info
3071 *
3072 * Return true if the VF is allowed to change its MAC filters, false otherwise
3073 */
3074static bool ice_can_vf_change_mac(struct ice_vf *vf)
3075{
3076 /* If the VF MAC address has been set administratively (via the
3077 * ndo_set_vf_mac command), then deny permission to the VF to
3078 * add/delete unicast MAC addresses, unless the VF is trusted
3079 */
3080 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
3081 return false;
3082
3083 return true;
3084}
3085
ed4c068d
BC
3086/**
3087 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
3088 * @vf: pointer to the VF info
3089 * @vsi: pointer to the VF's VSI
3090 * @mac_addr: MAC address to add
3091 */
3092static int
3093ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3094{
3095 struct device *dev = ice_pf_to_dev(vf->pf);
3096 enum ice_status status;
3097
3098 /* default unicast MAC already added */
3099 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3100 return 0;
3101
3102 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3103 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3104 return -EPERM;
3105 }
3106
1b8f15b6 3107 status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
ed4c068d
BC
3108 if (status == ICE_ERR_ALREADY_EXISTS) {
3109 dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
3110 vf->vf_id);
3111 return -EEXIST;
3112 } else if (status) {
0fee3577
LY
3113 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
3114 mac_addr, vf->vf_id, ice_stat_str(status));
ed4c068d
BC
3115 return -EIO;
3116 }
3117
bf8987df
PG
3118 /* Set the default LAN address to the latest unicast MAC address added
3119 * by the VF. The default LAN address is reported by the PF via
3120 * ndo_get_vf_config.
3121 */
3122 if (is_unicast_ether_addr(mac_addr))
ed4c068d
BC
3123 ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
3124
3125 vf->num_mac++;
3126
3127 return 0;
3128}
3129
3130/**
3131 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
3132 * @vf: pointer to the VF info
3133 * @vsi: pointer to the VF's VSI
3134 * @mac_addr: MAC address to delete
3135 */
3136static int
3137ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3138{
3139 struct device *dev = ice_pf_to_dev(vf->pf);
3140 enum ice_status status;
3141
3142 if (!ice_can_vf_change_mac(vf) &&
3143 ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3144 return 0;
3145
1b8f15b6 3146 status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
ed4c068d
BC
3147 if (status == ICE_ERR_DOES_NOT_EXIST) {
3148 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3149 vf->vf_id);
3150 return -ENOENT;
3151 } else if (status) {
0fee3577
LY
3152 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
3153 mac_addr, vf->vf_id, ice_stat_str(status));
ed4c068d
BC
3154 return -EIO;
3155 }
3156
3157 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3158 eth_zero_addr(vf->dflt_lan_addr.addr);
3159
3160 vf->num_mac--;
3161
3162 return 0;
3163}
3164
1071a835
AV
3165/**
3166 * ice_vc_handle_mac_addr_msg
3167 * @vf: pointer to the VF info
3168 * @msg: pointer to the msg buffer
f9867df6 3169 * @set: true if MAC filters are being set, false otherwise
1071a835 3170 *
df17b7e0 3171 * add guest MAC address filter
1071a835
AV
3172 */
3173static int
3174ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3175{
ed4c068d
BC
3176 int (*ice_vc_cfg_mac)
3177 (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
cf6c6e01 3178 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3179 struct virtchnl_ether_addr_list *al =
3180 (struct virtchnl_ether_addr_list *)msg;
3181 struct ice_pf *pf = vf->pf;
3182 enum virtchnl_ops vc_op;
1071a835 3183 struct ice_vsi *vsi;
1071a835
AV
3184 int i;
3185
ed4c068d 3186 if (set) {
1071a835 3187 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
ed4c068d
BC
3188 ice_vc_cfg_mac = ice_vc_add_mac_addr;
3189 } else {
1071a835 3190 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
ed4c068d
BC
3191 ice_vc_cfg_mac = ice_vc_del_mac_addr;
3192 }
1071a835
AV
3193
3194 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3195 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
cf6c6e01 3196 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3197 goto handle_mac_exit;
3198 }
3199
ed4c068d
BC
3200 /* If this VF is not privileged, then we can't add more than a
3201 * limited number of addresses. Check to make sure that the
3202 * additions do not push us over the limit.
3203 */
1071a835
AV
3204 if (set && !ice_is_vf_trusted(vf) &&
3205 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
19cce2c6 3206 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
d84b899a 3207 vf->vf_id);
cf6c6e01 3208 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3209 goto handle_mac_exit;
3210 }
3211
3212 vsi = pf->vsi[vf->lan_vsi_idx];
f1ef73f5 3213 if (!vsi) {
cf6c6e01 3214 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
3215 goto handle_mac_exit;
3216 }
1071a835
AV
3217
3218 for (i = 0; i < al->num_elements; i++) {
ed4c068d
BC
3219 u8 *mac_addr = al->list[i].addr;
3220 int result;
1071a835 3221
ed4c068d
BC
3222 if (is_broadcast_ether_addr(mac_addr) ||
3223 is_zero_ether_addr(mac_addr))
3224 continue;
1071a835 3225
ed4c068d
BC
3226 result = ice_vc_cfg_mac(vf, vsi, mac_addr);
3227 if (result == -EEXIST || result == -ENOENT) {
3228 continue;
3229 } else if (result) {
3230 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1071a835
AV
3231 goto handle_mac_exit;
3232 }
1071a835
AV
3233 }
3234
1071a835 3235handle_mac_exit:
1071a835 3236 /* send the response to the VF */
cf6c6e01 3237 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
1071a835
AV
3238}
3239
3240/**
3241 * ice_vc_add_mac_addr_msg
3242 * @vf: pointer to the VF info
3243 * @msg: pointer to the msg buffer
3244 *
3245 * add guest MAC address filter
3246 */
3247static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3248{
3249 return ice_vc_handle_mac_addr_msg(vf, msg, true);
3250}
3251
3252/**
3253 * ice_vc_del_mac_addr_msg
3254 * @vf: pointer to the VF info
3255 * @msg: pointer to the msg buffer
3256 *
3257 * remove guest MAC address filter
3258 */
3259static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3260{
3261 return ice_vc_handle_mac_addr_msg(vf, msg, false);
3262}
3263
3264/**
3265 * ice_vc_request_qs_msg
3266 * @vf: pointer to the VF info
3267 * @msg: pointer to the msg buffer
3268 *
3269 * VFs get a default number of queues but can use this message to request a
df17b7e0 3270 * different number. If the request is successful, PF will reset the VF and
1071a835 3271 * return 0. If unsuccessful, PF will send message informing VF of number of
f9867df6 3272 * available queue pairs via virtchnl message response to VF.
1071a835
AV
3273 */
3274static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
3275{
cf6c6e01 3276 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3277 struct virtchnl_vf_res_request *vfres =
3278 (struct virtchnl_vf_res_request *)msg;
cbfe31b5 3279 u16 req_queues = vfres->num_queue_pairs;
1071a835 3280 struct ice_pf *pf = vf->pf;
cbfe31b5
PK
3281 u16 max_allowed_vf_queues;
3282 u16 tx_rx_queue_left;
4015d11e 3283 struct device *dev;
4ee656bb 3284 u16 cur_queues;
1071a835 3285
4015d11e 3286 dev = ice_pf_to_dev(pf);
1071a835 3287 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3288 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3289 goto error_param;
3290 }
3291
5743020d 3292 cur_queues = vf->num_vf_qs;
8c243700
AV
3293 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
3294 ice_get_avail_rxq_count(pf));
5743020d 3295 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
cbfe31b5 3296 if (!req_queues) {
4015d11e 3297 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
cbfe31b5 3298 vf->vf_id);
0ca469fb 3299 } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
4015d11e 3300 dev_err(dev, "VF %d tried to request more than %d queues.\n",
0ca469fb
MW
3301 vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
3302 vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
cbfe31b5
PK
3303 } else if (req_queues > cur_queues &&
3304 req_queues - cur_queues > tx_rx_queue_left) {
19cce2c6 3305 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
1071a835 3306 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
cbfe31b5 3307 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
0ca469fb 3308 ICE_MAX_RSS_QS_PER_VF);
1071a835
AV
3309 } else {
3310 /* request is successful, then reset VF */
3311 vf->num_req_qs = req_queues;
ff010eca 3312 ice_vc_reset_vf(vf);
4015d11e 3313 dev_info(dev, "VF %d granted request of %u queues.\n",
1071a835
AV
3314 vf->vf_id, req_queues);
3315 return 0;
3316 }
3317
3318error_param:
3319 /* send the response to the VF */
3320 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
cf6c6e01 3321 v_ret, (u8 *)vfres, sizeof(*vfres));
1071a835
AV
3322}
3323
7c710869
AV
3324/**
3325 * ice_set_vf_port_vlan
3326 * @netdev: network interface device structure
3327 * @vf_id: VF identifier
f9867df6 3328 * @vlan_id: VLAN ID being set
7c710869
AV
3329 * @qos: priority setting
3330 * @vlan_proto: VLAN protocol
3331 *
f9867df6 3332 * program VF Port VLAN ID and/or QoS
7c710869
AV
3333 */
3334int
3335ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
3336 __be16 vlan_proto)
3337{
4c66d227 3338 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 3339 struct ice_vsi *vsi;
4015d11e 3340 struct device *dev;
7c710869 3341 struct ice_vf *vf;
61c9ce86 3342 u16 vlanprio;
c54d209c 3343 int ret;
7c710869 3344
4015d11e 3345 dev = ice_pf_to_dev(pf);
4c66d227 3346 if (ice_validate_vf_id(pf, vf_id))
7c710869 3347 return -EINVAL;
7c710869 3348
61c9ce86
BC
3349 if (vlan_id >= VLAN_N_VID || qos > 7) {
3350 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
3351 vf_id, vlan_id, qos);
7c710869
AV
3352 return -EINVAL;
3353 }
3354
3355 if (vlan_proto != htons(ETH_P_8021Q)) {
4015d11e 3356 dev_err(dev, "VF VLAN protocol is not supported\n");
7c710869
AV
3357 return -EPROTONOSUPPORT;
3358 }
3359
3360 vf = &pf->vf[vf_id];
3361 vsi = pf->vsi[vf->lan_vsi_idx];
c54d209c
BC
3362
3363 ret = ice_check_vf_ready_for_cfg(vf);
3364 if (ret)
3365 return ret;
7c710869 3366
61c9ce86
BC
3367 vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
3368
3369 if (vf->port_vlan_info == vlanprio) {
7c710869 3370 /* duplicate request, so just return success */
4015d11e 3371 dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
c54d209c 3372 return 0;
7c710869
AV
3373 }
3374
7c710869 3375 if (vlan_id || qos) {
72634bc2
BC
3376 /* remove VLAN 0 filter set by default when transitioning from
3377 * no port VLAN to a port VLAN. No change to old port VLAN on
3378 * failure.
3379 */
3380 ret = ice_vsi_kill_vlan(vsi, 0);
3381 if (ret)
3382 return ret;
77a7a84d 3383 ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
7c710869 3384 if (ret)
72634bc2 3385 return ret;
7c710869 3386 } else {
72634bc2
BC
3387 /* add VLAN 0 filter back when transitioning from port VLAN to
3388 * no port VLAN. No change to old port VLAN on failure.
3389 */
1b8f15b6 3390 ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
72634bc2
BC
3391 if (ret)
3392 return ret;
b093841f
BC
3393 ret = ice_vsi_manage_pvid(vsi, 0, false);
3394 if (ret)
e65ee2fb 3395 return ret;
7c710869
AV
3396 }
3397
3398 if (vlan_id) {
4015d11e 3399 dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
7c710869
AV
3400 vlan_id, qos, vf_id);
3401
72634bc2 3402 /* add VLAN filter for the port VLAN */
1b8f15b6 3403 ret = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
7c710869 3404 if (ret)
c54d209c 3405 return ret;
7c710869 3406 }
72634bc2
BC
3407 /* remove old port VLAN filter with valid VLAN ID or QoS fields */
3408 if (vf->port_vlan_info)
3409 ice_vsi_kill_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK);
7c710869 3410
72634bc2 3411 /* keep port VLAN information persistent on resets */
b093841f 3412 vf->port_vlan_info = le16_to_cpu(vsi->info.pvid);
7c710869 3413
c54d209c 3414 return 0;
7c710869
AV
3415}
3416
d4bc4e2d
BC
3417/**
3418 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
3419 * @caps: VF driver negotiated capabilities
3420 *
3421 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
3422 */
3423static bool ice_vf_vlan_offload_ena(u32 caps)
3424{
3425 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
3426}
3427
1071a835
AV
3428/**
3429 * ice_vc_process_vlan_msg
3430 * @vf: pointer to the VF info
3431 * @msg: pointer to the msg buffer
3432 * @add_v: Add VLAN if true, otherwise delete VLAN
3433 *
f9867df6 3434 * Process virtchnl op to add or remove programmed guest VLAN ID
1071a835
AV
3435 */
3436static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
3437{
cf6c6e01 3438 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3439 struct virtchnl_vlan_filter_list *vfl =
3440 (struct virtchnl_vlan_filter_list *)msg;
1071a835 3441 struct ice_pf *pf = vf->pf;
5eda8afd 3442 bool vlan_promisc = false;
1071a835 3443 struct ice_vsi *vsi;
4015d11e 3444 struct device *dev;
5eda8afd
AA
3445 struct ice_hw *hw;
3446 int status = 0;
3447 u8 promisc_m;
1071a835
AV
3448 int i;
3449
4015d11e 3450 dev = ice_pf_to_dev(pf);
1071a835 3451 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3452 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3453 goto error_param;
3454 }
3455
d4bc4e2d
BC
3456 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3457 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3458 goto error_param;
3459 }
3460
1071a835 3461 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
cf6c6e01 3462 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3463 goto error_param;
3464 }
3465
1071a835 3466 for (i = 0; i < vfl->num_elements; i++) {
61c9ce86 3467 if (vfl->vlan_id[i] >= VLAN_N_VID) {
cf6c6e01 3468 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6
AV
3469 dev_err(dev, "invalid VF VLAN id %d\n",
3470 vfl->vlan_id[i]);
1071a835
AV
3471 goto error_param;
3472 }
3473 }
3474
5eda8afd 3475 hw = &pf->hw;
f1ef73f5 3476 vsi = pf->vsi[vf->lan_vsi_idx];
1071a835 3477 if (!vsi) {
cf6c6e01 3478 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3479 goto error_param;
3480 }
3481
cd6d6b83
BC
3482 if (add_v && !ice_is_vf_trusted(vf) &&
3483 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
19cce2c6 3484 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
cd6d6b83
BC
3485 vf->vf_id);
3486 /* There is no need to let VF know about being not trusted,
3487 * so we can just return success message here
3488 */
3489 goto error_param;
3490 }
3491
1071a835 3492 if (vsi->info.pvid) {
cf6c6e01 3493 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3494 goto error_param;
3495 }
3496
01b5e89a
BC
3497 if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
3498 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
3499 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
5eda8afd
AA
3500 vlan_promisc = true;
3501
1071a835
AV
3502 if (add_v) {
3503 for (i = 0; i < vfl->num_elements; i++) {
3504 u16 vid = vfl->vlan_id[i];
3505
5079b853 3506 if (!ice_is_vf_trusted(vf) &&
cd6d6b83 3507 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
19cce2c6 3508 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
5079b853
AA
3509 vf->vf_id);
3510 /* There is no need to let VF know about being
3511 * not trusted, so we can just return success
3512 * message here as well.
3513 */
3514 goto error_param;
3515 }
3516
cd6d6b83
BC
3517 /* we add VLAN 0 by default for each VF so we can enable
3518 * Tx VLAN anti-spoof without triggering MDD events so
3519 * we don't need to add it again here
3520 */
3521 if (!vid)
3522 continue;
3523
1b8f15b6 3524 status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
cd6d6b83 3525 if (status) {
cf6c6e01 3526 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5eda8afd
AA
3527 goto error_param;
3528 }
1071a835 3529
42f3efef
BC
3530 /* Enable VLAN pruning when non-zero VLAN is added */
3531 if (!vlan_promisc && vid &&
3532 !ice_vsi_is_vlan_pruning_ena(vsi)) {
5eda8afd
AA
3533 status = ice_cfg_vlan_pruning(vsi, true, false);
3534 if (status) {
cf6c6e01 3535 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6 3536 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
5eda8afd
AA
3537 vid, status);
3538 goto error_param;
3539 }
42f3efef 3540 } else if (vlan_promisc) {
5eda8afd
AA
3541 /* Enable Ucast/Mcast VLAN promiscuous mode */
3542 promisc_m = ICE_PROMISC_VLAN_TX |
3543 ICE_PROMISC_VLAN_RX;
3544
3545 status = ice_set_vsi_promisc(hw, vsi->idx,
3546 promisc_m, vid);
cf6c6e01
MW
3547 if (status) {
3548 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
19cce2c6 3549 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
5eda8afd 3550 vid, status);
cf6c6e01 3551 }
1071a835
AV
3552 }
3553 }
3554 } else {
bb877b22
AA
3555 /* In case of non_trusted VF, number of VLAN elements passed
3556 * to PF for removal might be greater than number of VLANs
3557 * filter programmed for that VF - So, use actual number of
3558 * VLANS added earlier with add VLAN opcode. In order to avoid
3559 * removing VLAN that doesn't exist, which result to sending
3560 * erroneous failed message back to the VF
3561 */
3562 int num_vf_vlan;
3563
cd6d6b83 3564 num_vf_vlan = vsi->num_vlan;
bb877b22 3565 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
1071a835
AV
3566 u16 vid = vfl->vlan_id[i];
3567
cd6d6b83
BC
3568 /* we add VLAN 0 by default for each VF so we can enable
3569 * Tx VLAN anti-spoof without triggering MDD events so
3570 * we don't want a VIRTCHNL request to remove it
3571 */
3572 if (!vid)
3573 continue;
3574
1071a835
AV
3575 /* Make sure ice_vsi_kill_vlan is successful before
3576 * updating VLAN information
3577 */
cd6d6b83
BC
3578 status = ice_vsi_kill_vlan(vsi, vid);
3579 if (status) {
cf6c6e01 3580 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
5eda8afd
AA
3581 goto error_param;
3582 }
3583
42f3efef
BC
3584 /* Disable VLAN pruning when only VLAN 0 is left */
3585 if (vsi->num_vlan == 1 &&
3586 ice_vsi_is_vlan_pruning_ena(vsi))
cd186e51 3587 ice_cfg_vlan_pruning(vsi, false, false);
5eda8afd
AA
3588
3589 /* Disable Unicast/Multicast VLAN promiscuous mode */
3590 if (vlan_promisc) {
3591 promisc_m = ICE_PROMISC_VLAN_TX |
3592 ICE_PROMISC_VLAN_RX;
1071a835 3593
5eda8afd
AA
3594 ice_clear_vsi_promisc(hw, vsi->idx,
3595 promisc_m, vid);
1071a835
AV
3596 }
3597 }
3598 }
3599
3600error_param:
3601 /* send the response to the VF */
3602 if (add_v)
cf6c6e01 3603 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
1071a835
AV
3604 NULL, 0);
3605 else
cf6c6e01 3606 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
1071a835
AV
3607 NULL, 0);
3608}
3609
3610/**
3611 * ice_vc_add_vlan_msg
3612 * @vf: pointer to the VF info
3613 * @msg: pointer to the msg buffer
3614 *
f9867df6 3615 * Add and program guest VLAN ID
1071a835
AV
3616 */
3617static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
3618{
3619 return ice_vc_process_vlan_msg(vf, msg, true);
3620}
3621
3622/**
3623 * ice_vc_remove_vlan_msg
3624 * @vf: pointer to the VF info
3625 * @msg: pointer to the msg buffer
3626 *
f9867df6 3627 * remove programmed guest VLAN ID
1071a835
AV
3628 */
3629static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
3630{
3631 return ice_vc_process_vlan_msg(vf, msg, false);
3632}
3633
3634/**
3635 * ice_vc_ena_vlan_stripping
3636 * @vf: pointer to the VF info
3637 *
3638 * Enable VLAN header stripping for a given VF
3639 */
3640static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
3641{
cf6c6e01 3642 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3643 struct ice_pf *pf = vf->pf;
3644 struct ice_vsi *vsi;
3645
3646 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3647 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3648 goto error_param;
3649 }
3650
d4bc4e2d
BC
3651 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3652 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3653 goto error_param;
3654 }
3655
1071a835
AV
3656 vsi = pf->vsi[vf->lan_vsi_idx];
3657 if (ice_vsi_manage_vlan_stripping(vsi, true))
cf6c6e01 3658 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3659
3660error_param:
3661 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
cf6c6e01 3662 v_ret, NULL, 0);
1071a835
AV
3663}
3664
3665/**
3666 * ice_vc_dis_vlan_stripping
3667 * @vf: pointer to the VF info
3668 *
3669 * Disable VLAN header stripping for a given VF
3670 */
3671static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
3672{
cf6c6e01 3673 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1071a835
AV
3674 struct ice_pf *pf = vf->pf;
3675 struct ice_vsi *vsi;
3676
3677 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
cf6c6e01 3678 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3679 goto error_param;
3680 }
3681
d4bc4e2d
BC
3682 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3683 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3684 goto error_param;
3685 }
3686
1071a835 3687 vsi = pf->vsi[vf->lan_vsi_idx];
f1ef73f5 3688 if (!vsi) {
cf6c6e01 3689 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
f1ef73f5
AA
3690 goto error_param;
3691 }
3692
1071a835 3693 if (ice_vsi_manage_vlan_stripping(vsi, false))
cf6c6e01 3694 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1071a835
AV
3695
3696error_param:
3697 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
cf6c6e01 3698 v_ret, NULL, 0);
1071a835
AV
3699}
3700
2f9ec241
BC
3701/**
3702 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
3703 * @vf: VF to enable/disable VLAN stripping for on initialization
3704 *
3705 * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
3706 * the flag is cleared then we want to disable stripping. For example, the flag
3707 * will be cleared when port VLANs are configured by the administrator before
3708 * passing the VF to the guest or if the AVF driver doesn't support VLAN
3709 * offloads.
3710 */
3711static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
3712{
3713 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
3714
3715 if (!vsi)
3716 return -EINVAL;
3717
3718 /* don't modify stripping if port VLAN is configured */
3719 if (vsi->info.pvid)
3720 return 0;
3721
3722 if (ice_vf_vlan_offload_ena(vf->driver_caps))
3723 return ice_vsi_manage_vlan_stripping(vsi, true);
3724 else
3725 return ice_vsi_manage_vlan_stripping(vsi, false);
3726}
3727
1071a835
AV
3728/**
3729 * ice_vc_process_vf_msg - Process request from VF
3730 * @pf: pointer to the PF structure
3731 * @event: pointer to the AQ event
3732 *
3733 * called from the common asq/arq handler to
3734 * process request from VF
3735 */
3736void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3737{
3738 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3739 s16 vf_id = le16_to_cpu(event->desc.retval);
3740 u16 msglen = event->msg_len;
3741 u8 *msg = event->msg_buf;
3742 struct ice_vf *vf = NULL;
4015d11e 3743 struct device *dev;
1071a835
AV
3744 int err = 0;
3745
4015d11e 3746 dev = ice_pf_to_dev(pf);
4c66d227 3747 if (ice_validate_vf_id(pf, vf_id)) {
1071a835
AV
3748 err = -EINVAL;
3749 goto error_handler;
3750 }
3751
3752 vf = &pf->vf[vf_id];
3753
3754 /* Check if VF is disabled. */
3755 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3756 err = -EPERM;
3757 goto error_handler;
3758 }
3759
3760 /* Perform basic checks on the msg */
3761 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3762 if (err) {
cf6c6e01 3763 if (err == VIRTCHNL_STATUS_ERR_PARAM)
1071a835
AV
3764 err = -EPERM;
3765 else
3766 err = -EINVAL;
1071a835
AV
3767 }
3768
3769error_handler:
3770 if (err) {
cf6c6e01
MW
3771 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3772 NULL, 0);
4015d11e 3773 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
1071a835
AV
3774 vf_id, v_opcode, msglen, err);
3775 return;
3776 }
3777
3778 switch (v_opcode) {
3779 case VIRTCHNL_OP_VERSION:
3780 err = ice_vc_get_ver_msg(vf, msg);
3781 break;
3782 case VIRTCHNL_OP_GET_VF_RESOURCES:
3783 err = ice_vc_get_vf_res_msg(vf, msg);
2f9ec241 3784 if (ice_vf_init_vlan_stripping(vf))
19cce2c6 3785 dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
2f9ec241 3786 vf->vf_id);
dfc62400 3787 ice_vc_notify_vf_link_state(vf);
1071a835
AV
3788 break;
3789 case VIRTCHNL_OP_RESET_VF:
3790 ice_vc_reset_vf_msg(vf);
3791 break;
3792 case VIRTCHNL_OP_ADD_ETH_ADDR:
3793 err = ice_vc_add_mac_addr_msg(vf, msg);
3794 break;
3795 case VIRTCHNL_OP_DEL_ETH_ADDR:
3796 err = ice_vc_del_mac_addr_msg(vf, msg);
3797 break;
3798 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3799 err = ice_vc_cfg_qs_msg(vf, msg);
3800 break;
3801 case VIRTCHNL_OP_ENABLE_QUEUES:
3802 err = ice_vc_ena_qs_msg(vf, msg);
3803 ice_vc_notify_vf_link_state(vf);
3804 break;
3805 case VIRTCHNL_OP_DISABLE_QUEUES:
3806 err = ice_vc_dis_qs_msg(vf, msg);
3807 break;
3808 case VIRTCHNL_OP_REQUEST_QUEUES:
3809 err = ice_vc_request_qs_msg(vf, msg);
3810 break;
3811 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3812 err = ice_vc_cfg_irq_map_msg(vf, msg);
3813 break;
3814 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3815 err = ice_vc_config_rss_key(vf, msg);
3816 break;
3817 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3818 err = ice_vc_config_rss_lut(vf, msg);
3819 break;
3820 case VIRTCHNL_OP_GET_STATS:
3821 err = ice_vc_get_stats_msg(vf, msg);
3822 break;
01b5e89a
BC
3823 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3824 err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
3825 break;
1071a835
AV
3826 case VIRTCHNL_OP_ADD_VLAN:
3827 err = ice_vc_add_vlan_msg(vf, msg);
3828 break;
3829 case VIRTCHNL_OP_DEL_VLAN:
3830 err = ice_vc_remove_vlan_msg(vf, msg);
3831 break;
3832 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3833 err = ice_vc_ena_vlan_stripping(vf);
3834 break;
3835 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3836 err = ice_vc_dis_vlan_stripping(vf);
3837 break;
3838 case VIRTCHNL_OP_UNKNOWN:
3839 default:
4015d11e
BC
3840 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3841 vf_id);
cf6c6e01
MW
3842 err = ice_vc_send_msg_to_vf(vf, v_opcode,
3843 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
1071a835
AV
3844 NULL, 0);
3845 break;
3846 }
3847 if (err) {
3848 /* Helper function cares less about error return values here
3849 * as it is busy with pending work.
3850 */
4015d11e 3851 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
1071a835
AV
3852 vf_id, v_opcode, err);
3853 }
3854}
3855
7c710869
AV
3856/**
3857 * ice_get_vf_cfg
3858 * @netdev: network interface device structure
3859 * @vf_id: VF identifier
3860 * @ivi: VF configuration structure
3861 *
3862 * return VF configuration
3863 */
c8b7abdd
BA
3864int
3865ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
7c710869 3866{
4c66d227 3867 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869
AV
3868 struct ice_vf *vf;
3869
4c66d227 3870 if (ice_validate_vf_id(pf, vf_id))
7c710869 3871 return -EINVAL;
7c710869
AV
3872
3873 vf = &pf->vf[vf_id];
7c710869 3874
4c66d227 3875 if (ice_check_vf_init(pf, vf))
7c710869 3876 return -EBUSY;
7c710869
AV
3877
3878 ivi->vf = vf_id;
3879 ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3880
3881 /* VF configuration for VLAN and applicable QoS */
61c9ce86
BC
3882 ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
3883 ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
7c710869
AV
3884
3885 ivi->trusted = vf->trusted;
3886 ivi->spoofchk = vf->spoofchk;
3887 if (!vf->link_forced)
3888 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3889 else if (vf->link_up)
3890 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3891 else
3892 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3893 ivi->max_tx_rate = vf->tx_rate;
3894 ivi->min_tx_rate = 0;
3895 return 0;
3896}
3897
47ebc7b0
BC
3898/**
3899 * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
3900 * @pf: PF used to reference the switch's rules
3901 * @umac: unicast MAC to compare against existing switch rules
3902 *
3903 * Return true on the first/any match, else return false
3904 */
3905static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
3906{
3907 struct ice_sw_recipe *mac_recipe_list =
3908 &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
3909 struct ice_fltr_mgmt_list_entry *list_itr;
3910 struct list_head *rule_head;
3911 struct mutex *rule_lock; /* protect MAC filter list access */
3912
3913 rule_head = &mac_recipe_list->filt_rules;
3914 rule_lock = &mac_recipe_list->filt_rule_lock;
3915
3916 mutex_lock(rule_lock);
3917 list_for_each_entry(list_itr, rule_head, list_entry) {
3918 u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3919
3920 if (ether_addr_equal(existing_mac, umac)) {
3921 mutex_unlock(rule_lock);
3922 return true;
3923 }
3924 }
3925
3926 mutex_unlock(rule_lock);
3927
3928 return false;
3929}
3930
7c710869
AV
3931/**
3932 * ice_set_vf_mac
3933 * @netdev: network interface device structure
3934 * @vf_id: VF identifier
f9867df6 3935 * @mac: MAC address
7c710869 3936 *
f9867df6 3937 * program VF MAC address
7c710869
AV
3938 */
3939int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3940{
4c66d227 3941 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 3942 struct ice_vf *vf;
c54d209c 3943 int ret;
7c710869 3944
4c66d227 3945 if (ice_validate_vf_id(pf, vf_id))
7c710869 3946 return -EINVAL;
7c710869 3947
7c710869
AV
3948 if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
3949 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3950 return -EINVAL;
3951 }
3952
c54d209c 3953 vf = &pf->vf[vf_id];
47ebc7b0
BC
3954 /* nothing left to do, unicast MAC already set */
3955 if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
3956 return 0;
3957
c54d209c
BC
3958 ret = ice_check_vf_ready_for_cfg(vf);
3959 if (ret)
3960 return ret;
3961
47ebc7b0
BC
3962 if (ice_unicast_mac_exists(pf, mac)) {
3963 netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
3964 mac, vf_id, mac);
3965 return -EINVAL;
3966 }
3967
f9867df6 3968 /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
7c710869
AV
3969 * flow will use the updated dflt_lan_addr and add a MAC filter
3970 * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
3971 * set the MAC address for this VF.
3972 */
3973 ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3974 vf->pf_set_mac = true;
19cce2c6 3975 netdev_info(netdev, "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
7c710869
AV
3976 vf_id, mac);
3977
ff010eca 3978 ice_vc_reset_vf(vf);
c54d209c 3979 return 0;
7c710869
AV
3980}
3981
3982/**
3983 * ice_set_vf_trust
3984 * @netdev: network interface device structure
3985 * @vf_id: VF identifier
3986 * @trusted: Boolean value to enable/disable trusted VF
3987 *
3988 * Enable or disable a given VF as trusted
3989 */
3990int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3991{
4c66d227 3992 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 3993 struct ice_vf *vf;
c54d209c 3994 int ret;
7c710869 3995
4c66d227 3996 if (ice_validate_vf_id(pf, vf_id))
7c710869 3997 return -EINVAL;
7c710869
AV
3998
3999 vf = &pf->vf[vf_id];
c54d209c
BC
4000 ret = ice_check_vf_ready_for_cfg(vf);
4001 if (ret)
4002 return ret;
7c710869
AV
4003
4004 /* Check if already trusted */
4005 if (trusted == vf->trusted)
4006 return 0;
4007
4008 vf->trusted = trusted;
ff010eca 4009 ice_vc_reset_vf(vf);
19cce2c6 4010 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
7c710869
AV
4011 vf_id, trusted ? "" : "un");
4012
4013 return 0;
4014}
4015
4016/**
4017 * ice_set_vf_link_state
4018 * @netdev: network interface device structure
4019 * @vf_id: VF identifier
4020 * @link_state: required link state
4021 *
4022 * Set VF's link state, irrespective of physical link state status
4023 */
4024int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
4025{
4c66d227 4026 struct ice_pf *pf = ice_netdev_to_pf(netdev);
7c710869 4027 struct ice_vf *vf;
c54d209c 4028 int ret;
7c710869 4029
4c66d227 4030 if (ice_validate_vf_id(pf, vf_id))
7c710869 4031 return -EINVAL;
7c710869
AV
4032
4033 vf = &pf->vf[vf_id];
c54d209c
BC
4034 ret = ice_check_vf_ready_for_cfg(vf);
4035 if (ret)
4036 return ret;
7c710869 4037
7c710869
AV
4038 switch (link_state) {
4039 case IFLA_VF_LINK_STATE_AUTO:
4040 vf->link_forced = false;
7c710869
AV
4041 break;
4042 case IFLA_VF_LINK_STATE_ENABLE:
4043 vf->link_forced = true;
4044 vf->link_up = true;
4045 break;
4046 case IFLA_VF_LINK_STATE_DISABLE:
4047 vf->link_forced = true;
4048 vf->link_up = false;
4049 break;
4050 default:
4051 return -EINVAL;
4052 }
4053
26a91525 4054 ice_vc_notify_vf_link_state(vf);
7c710869
AV
4055
4056 return 0;
4057}
730fdea4
JB
4058
4059/**
4060 * ice_get_vf_stats - populate some stats for the VF
4061 * @netdev: the netdev of the PF
4062 * @vf_id: the host OS identifier (0-255)
4063 * @vf_stats: pointer to the OS memory to be initialized
4064 */
4065int ice_get_vf_stats(struct net_device *netdev, int vf_id,
4066 struct ifla_vf_stats *vf_stats)
4067{
4068 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4069 struct ice_eth_stats *stats;
4070 struct ice_vsi *vsi;
4071 struct ice_vf *vf;
c54d209c 4072 int ret;
730fdea4
JB
4073
4074 if (ice_validate_vf_id(pf, vf_id))
4075 return -EINVAL;
4076
4077 vf = &pf->vf[vf_id];
c54d209c
BC
4078 ret = ice_check_vf_ready_for_cfg(vf);
4079 if (ret)
4080 return ret;
730fdea4
JB
4081
4082 vsi = pf->vsi[vf->lan_vsi_idx];
4083 if (!vsi)
4084 return -EINVAL;
4085
4086 ice_update_eth_stats(vsi);
4087 stats = &vsi->eth_stats;
4088
4089 memset(vf_stats, 0, sizeof(*vf_stats));
4090
4091 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4092 stats->rx_multicast;
4093 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4094 stats->tx_multicast;
4095 vf_stats->rx_bytes = stats->rx_bytes;
4096 vf_stats->tx_bytes = stats->tx_bytes;
4097 vf_stats->broadcast = stats->rx_broadcast;
4098 vf_stats->multicast = stats->rx_multicast;
4099 vf_stats->rx_dropped = stats->rx_discards;
4100 vf_stats->tx_dropped = stats->tx_discards;
4101
4102 return 0;
4103}
9d5c5a52 4104
7438a3b0
PG
4105/**
4106 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
4107 * @vf: pointer to the VF structure
4108 */
4109void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
4110{
4111 struct ice_pf *pf = vf->pf;
4112 struct device *dev;
4113
4114 dev = ice_pf_to_dev(pf);
4115
4116 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
4117 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
4118 vf->dflt_lan_addr.addr,
4119 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
4120 ? "on" : "off");
4121}
4122
9d5c5a52
PG
4123/**
4124 * ice_print_vfs_mdd_event - print VFs malicious driver detect event
4125 * @pf: pointer to the PF structure
4126 *
4127 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
4128 */
4129void ice_print_vfs_mdd_events(struct ice_pf *pf)
4130{
4131 struct device *dev = ice_pf_to_dev(pf);
4132 struct ice_hw *hw = &pf->hw;
4133 int i;
4134
4135 /* check that there are pending MDD events to print */
4136 if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
4137 return;
4138
4139 /* VF MDD event logs are rate limited to one second intervals */
4140 if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
4141 return;
4142
4143 pf->last_printed_mdd_jiffies = jiffies;
4144
4145 ice_for_each_vf(pf, i) {
4146 struct ice_vf *vf = &pf->vf[i];
4147
4148 /* only print Rx MDD event message if there are new events */
4149 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
4150 vf->mdd_rx_events.last_printed =
4151 vf->mdd_rx_events.count;
7438a3b0 4152 ice_print_vf_rx_mdd_event(vf);
9d5c5a52
PG
4153 }
4154
4155 /* only print Tx MDD event message if there are new events */
4156 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
4157 vf->mdd_tx_events.last_printed =
4158 vf->mdd_tx_events.count;
4159
4160 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
4161 vf->mdd_tx_events.count, hw->pf_id, i,
4162 vf->dflt_lan_addr.addr);
4163 }
4164 }
4165}