]>
Commit | Line | Data |
---|---|---|
5c3c48ac JB |
1 | /******************************************************************************* |
2 | * | |
3 | * Intel Ethernet Controller XL710 Family Linux Driver | |
dc641b73 | 4 | * Copyright(c) 2013 - 2014 Intel Corporation. |
5c3c48ac JB |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms and conditions of the GNU General Public License, | |
8 | * version 2, as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope it will be useful, but WITHOUT | |
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | * more details. | |
14 | * | |
dc641b73 GR |
15 | * You should have received a copy of the GNU General Public License along |
16 | * with this program. If not, see <http://www.gnu.org/licenses/>. | |
5c3c48ac JB |
17 | * |
18 | * The full GNU General Public License is included in this distribution in | |
19 | * the file called "COPYING". | |
20 | * | |
21 | * Contact Information: | |
22 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
23 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
24 | * | |
25 | ******************************************************************************/ | |
26 | ||
27 | #include "i40e.h" | |
28 | ||
29 | /***********************misc routines*****************************/ | |
30 | ||
f9b4b627 GR |
31 | /** |
32 | * i40e_vc_disable_vf | |
33 | * @pf: pointer to the pf info | |
34 | * @vf: pointer to the vf info | |
35 | * | |
36 | * Disable the VF through a SW reset | |
37 | **/ | |
38 | static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) | |
39 | { | |
40 | struct i40e_hw *hw = &pf->hw; | |
41 | u32 reg; | |
42 | ||
43 | reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); | |
44 | reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; | |
45 | wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); | |
46 | i40e_flush(hw); | |
47 | } | |
48 | ||
5c3c48ac JB |
49 | /** |
50 | * i40e_vc_isvalid_vsi_id | |
51 | * @vf: pointer to the vf info | |
52 | * @vsi_id: vf relative vsi id | |
53 | * | |
54 | * check for the valid vsi id | |
55 | **/ | |
56 | static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id) | |
57 | { | |
58 | struct i40e_pf *pf = vf->pf; | |
59 | ||
60 | return pf->vsi[vsi_id]->vf_id == vf->vf_id; | |
61 | } | |
62 | ||
63 | /** | |
64 | * i40e_vc_isvalid_queue_id | |
65 | * @vf: pointer to the vf info | |
66 | * @vsi_id: vsi id | |
67 | * @qid: vsi relative queue id | |
68 | * | |
69 | * check for the valid queue id | |
70 | **/ | |
71 | static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id, | |
72 | u8 qid) | |
73 | { | |
74 | struct i40e_pf *pf = vf->pf; | |
75 | ||
76 | return qid < pf->vsi[vsi_id]->num_queue_pairs; | |
77 | } | |
78 | ||
79 | /** | |
80 | * i40e_vc_isvalid_vector_id | |
81 | * @vf: pointer to the vf info | |
82 | * @vector_id: vf relative vector id | |
83 | * | |
84 | * check for the valid vector id | |
85 | **/ | |
86 | static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) | |
87 | { | |
88 | struct i40e_pf *pf = vf->pf; | |
89 | ||
9347eb77 | 90 | return vector_id < pf->hw.func_caps.num_msix_vectors_vf; |
5c3c48ac JB |
91 | } |
92 | ||
93 | /***********************vf resource mgmt routines*****************/ | |
94 | ||
95 | /** | |
96 | * i40e_vc_get_pf_queue_id | |
97 | * @vf: pointer to the vf info | |
98 | * @vsi_idx: index of VSI in PF struct | |
99 | * @vsi_queue_id: vsi relative queue id | |
100 | * | |
101 | * return pf relative queue id | |
102 | **/ | |
103 | static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx, | |
104 | u8 vsi_queue_id) | |
105 | { | |
106 | struct i40e_pf *pf = vf->pf; | |
107 | struct i40e_vsi *vsi = pf->vsi[vsi_idx]; | |
108 | u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; | |
109 | ||
110 | if (le16_to_cpu(vsi->info.mapping_flags) & | |
111 | I40E_AQ_VSI_QUE_MAP_NONCONTIG) | |
112 | pf_queue_id = | |
113 | le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); | |
114 | else | |
115 | pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + | |
116 | vsi_queue_id; | |
117 | ||
118 | return pf_queue_id; | |
119 | } | |
120 | ||
5c3c48ac JB |
121 | /** |
122 | * i40e_config_irq_link_list | |
123 | * @vf: pointer to the vf info | |
124 | * @vsi_idx: index of VSI in PF struct | |
125 | * @vecmap: irq map info | |
126 | * | |
127 | * configure irq link list from the map | |
128 | **/ | |
129 | static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx, | |
130 | struct i40e_virtchnl_vector_map *vecmap) | |
131 | { | |
132 | unsigned long linklistmap = 0, tempmap; | |
133 | struct i40e_pf *pf = vf->pf; | |
134 | struct i40e_hw *hw = &pf->hw; | |
135 | u16 vsi_queue_id, pf_queue_id; | |
136 | enum i40e_queue_type qtype; | |
137 | u16 next_q, vector_id; | |
138 | u32 reg, reg_idx; | |
139 | u16 itr_idx = 0; | |
140 | ||
141 | vector_id = vecmap->vector_id; | |
142 | /* setup the head */ | |
143 | if (0 == vector_id) | |
144 | reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); | |
145 | else | |
146 | reg_idx = I40E_VPINT_LNKLSTN( | |
9347eb77 MW |
147 | ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + |
148 | (vector_id - 1)); | |
5c3c48ac JB |
149 | |
150 | if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { | |
151 | /* Special case - No queues mapped on this vector */ | |
152 | wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); | |
153 | goto irq_list_done; | |
154 | } | |
155 | tempmap = vecmap->rxq_map; | |
4836650b | 156 | for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { |
5c3c48ac JB |
157 | linklistmap |= (1 << |
158 | (I40E_VIRTCHNL_SUPPORTED_QTYPES * | |
159 | vsi_queue_id)); | |
5c3c48ac JB |
160 | } |
161 | ||
162 | tempmap = vecmap->txq_map; | |
4836650b | 163 | for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { |
5c3c48ac JB |
164 | linklistmap |= (1 << |
165 | (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id | |
166 | + 1)); | |
5c3c48ac JB |
167 | } |
168 | ||
169 | next_q = find_first_bit(&linklistmap, | |
170 | (I40E_MAX_VSI_QP * | |
171 | I40E_VIRTCHNL_SUPPORTED_QTYPES)); | |
172 | vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; | |
173 | qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; | |
174 | pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); | |
175 | reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); | |
176 | ||
177 | wr32(hw, reg_idx, reg); | |
178 | ||
179 | while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { | |
180 | switch (qtype) { | |
181 | case I40E_QUEUE_TYPE_RX: | |
182 | reg_idx = I40E_QINT_RQCTL(pf_queue_id); | |
183 | itr_idx = vecmap->rxitr_idx; | |
184 | break; | |
185 | case I40E_QUEUE_TYPE_TX: | |
186 | reg_idx = I40E_QINT_TQCTL(pf_queue_id); | |
187 | itr_idx = vecmap->txitr_idx; | |
188 | break; | |
189 | default: | |
190 | break; | |
191 | } | |
192 | ||
193 | next_q = find_next_bit(&linklistmap, | |
194 | (I40E_MAX_VSI_QP * | |
195 | I40E_VIRTCHNL_SUPPORTED_QTYPES), | |
196 | next_q + 1); | |
829af3ac MW |
197 | if (next_q < |
198 | (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { | |
5c3c48ac JB |
199 | vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; |
200 | qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; | |
201 | pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, | |
202 | vsi_queue_id); | |
203 | } else { | |
204 | pf_queue_id = I40E_QUEUE_END_OF_LIST; | |
205 | qtype = 0; | |
206 | } | |
207 | ||
208 | /* format for the RQCTL & TQCTL regs is same */ | |
209 | reg = (vector_id) | | |
210 | (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | | |
211 | (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | | |
212 | (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | | |
213 | (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); | |
214 | wr32(hw, reg_idx, reg); | |
215 | } | |
216 | ||
217 | irq_list_done: | |
218 | i40e_flush(hw); | |
219 | } | |
220 | ||
221 | /** | |
222 | * i40e_config_vsi_tx_queue | |
223 | * @vf: pointer to the vf info | |
224 | * @vsi_idx: index of VSI in PF struct | |
225 | * @vsi_queue_id: vsi relative queue index | |
226 | * @info: config. info | |
227 | * | |
228 | * configure tx queue | |
229 | **/ | |
230 | static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, | |
231 | u16 vsi_queue_id, | |
232 | struct i40e_virtchnl_txq_info *info) | |
233 | { | |
234 | struct i40e_pf *pf = vf->pf; | |
235 | struct i40e_hw *hw = &pf->hw; | |
236 | struct i40e_hmc_obj_txq tx_ctx; | |
237 | u16 pf_queue_id; | |
238 | u32 qtx_ctl; | |
239 | int ret = 0; | |
240 | ||
241 | pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); | |
242 | ||
243 | /* clear the context structure first */ | |
244 | memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); | |
245 | ||
246 | /* only set the required fields */ | |
247 | tx_ctx.base = info->dma_ring_addr / 128; | |
248 | tx_ctx.qlen = info->ring_len; | |
249 | tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); | |
250 | tx_ctx.rdylist_act = 0; | |
1943d8ba JB |
251 | tx_ctx.head_wb_ena = 1; |
252 | tx_ctx.head_wb_addr = info->dma_ring_addr + | |
253 | (info->ring_len * sizeof(struct i40e_tx_desc)); | |
5c3c48ac JB |
254 | |
255 | /* clear the context in the HMC */ | |
256 | ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); | |
257 | if (ret) { | |
258 | dev_err(&pf->pdev->dev, | |
259 | "Failed to clear VF LAN Tx queue context %d, error: %d\n", | |
260 | pf_queue_id, ret); | |
261 | ret = -ENOENT; | |
262 | goto error_context; | |
263 | } | |
264 | ||
265 | /* set the context in the HMC */ | |
266 | ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); | |
267 | if (ret) { | |
268 | dev_err(&pf->pdev->dev, | |
269 | "Failed to set VF LAN Tx queue context %d error: %d\n", | |
270 | pf_queue_id, ret); | |
271 | ret = -ENOENT; | |
272 | goto error_context; | |
273 | } | |
274 | ||
275 | /* associate this queue with the PCI VF function */ | |
276 | qtx_ctl = I40E_QTX_CTL_VF_QUEUE; | |
13fd9774 | 277 | qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
5c3c48ac JB |
278 | & I40E_QTX_CTL_PF_INDX_MASK); |
279 | qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) | |
280 | << I40E_QTX_CTL_VFVM_INDX_SHIFT) | |
281 | & I40E_QTX_CTL_VFVM_INDX_MASK); | |
282 | wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); | |
283 | i40e_flush(hw); | |
284 | ||
285 | error_context: | |
286 | return ret; | |
287 | } | |
288 | ||
289 | /** | |
290 | * i40e_config_vsi_rx_queue | |
291 | * @vf: pointer to the vf info | |
292 | * @vsi_idx: index of VSI in PF struct | |
293 | * @vsi_queue_id: vsi relative queue index | |
294 | * @info: config. info | |
295 | * | |
296 | * configure rx queue | |
297 | **/ | |
298 | static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx, | |
299 | u16 vsi_queue_id, | |
300 | struct i40e_virtchnl_rxq_info *info) | |
301 | { | |
302 | struct i40e_pf *pf = vf->pf; | |
303 | struct i40e_hw *hw = &pf->hw; | |
304 | struct i40e_hmc_obj_rxq rx_ctx; | |
305 | u16 pf_queue_id; | |
306 | int ret = 0; | |
307 | ||
308 | pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); | |
309 | ||
310 | /* clear the context structure first */ | |
311 | memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); | |
312 | ||
313 | /* only set the required fields */ | |
314 | rx_ctx.base = info->dma_ring_addr / 128; | |
315 | rx_ctx.qlen = info->ring_len; | |
316 | ||
317 | if (info->splithdr_enabled) { | |
318 | rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | | |
319 | I40E_RX_SPLIT_IP | | |
320 | I40E_RX_SPLIT_TCP_UDP | | |
321 | I40E_RX_SPLIT_SCTP; | |
322 | /* header length validation */ | |
323 | if (info->hdr_size > ((2 * 1024) - 64)) { | |
324 | ret = -EINVAL; | |
325 | goto error_param; | |
326 | } | |
327 | rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; | |
328 | ||
329 | /* set splitalways mode 10b */ | |
330 | rx_ctx.dtype = 0x2; | |
331 | } | |
332 | ||
333 | /* databuffer length validation */ | |
334 | if (info->databuffer_size > ((16 * 1024) - 128)) { | |
335 | ret = -EINVAL; | |
336 | goto error_param; | |
337 | } | |
338 | rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; | |
339 | ||
340 | /* max pkt. length validation */ | |
341 | if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { | |
342 | ret = -EINVAL; | |
343 | goto error_param; | |
344 | } | |
345 | rx_ctx.rxmax = info->max_pkt_size; | |
346 | ||
347 | /* enable 32bytes desc always */ | |
348 | rx_ctx.dsize = 1; | |
349 | ||
350 | /* default values */ | |
351 | rx_ctx.tphrdesc_ena = 1; | |
352 | rx_ctx.tphwdesc_ena = 1; | |
353 | rx_ctx.tphdata_ena = 1; | |
354 | rx_ctx.tphhead_ena = 1; | |
355 | rx_ctx.lrxqthresh = 2; | |
356 | rx_ctx.crcstrip = 1; | |
357 | ||
358 | /* clear the context in the HMC */ | |
359 | ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); | |
360 | if (ret) { | |
361 | dev_err(&pf->pdev->dev, | |
362 | "Failed to clear VF LAN Rx queue context %d, error: %d\n", | |
363 | pf_queue_id, ret); | |
364 | ret = -ENOENT; | |
365 | goto error_param; | |
366 | } | |
367 | ||
368 | /* set the context in the HMC */ | |
369 | ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); | |
370 | if (ret) { | |
371 | dev_err(&pf->pdev->dev, | |
372 | "Failed to set VF LAN Rx queue context %d error: %d\n", | |
373 | pf_queue_id, ret); | |
374 | ret = -ENOENT; | |
375 | goto error_param; | |
376 | } | |
377 | ||
378 | error_param: | |
379 | return ret; | |
380 | } | |
381 | ||
382 | /** | |
383 | * i40e_alloc_vsi_res | |
384 | * @vf: pointer to the vf info | |
385 | * @type: type of VSI to allocate | |
386 | * | |
387 | * alloc vf vsi context & resources | |
388 | **/ | |
389 | static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) | |
390 | { | |
391 | struct i40e_mac_filter *f = NULL; | |
392 | struct i40e_pf *pf = vf->pf; | |
5c3c48ac JB |
393 | struct i40e_vsi *vsi; |
394 | int ret = 0; | |
395 | ||
396 | vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); | |
397 | ||
398 | if (!vsi) { | |
399 | dev_err(&pf->pdev->dev, | |
400 | "add vsi failed for vf %d, aq_err %d\n", | |
401 | vf->vf_id, pf->hw.aq.asq_last_status); | |
402 | ret = -ENOENT; | |
403 | goto error_alloc_vsi_res; | |
404 | } | |
405 | if (type == I40E_VSI_SRIOV) { | |
1a10370a | 406 | u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
5c3c48ac JB |
407 | vf->lan_vsi_index = vsi->idx; |
408 | vf->lan_vsi_id = vsi->id; | |
409 | dev_info(&pf->pdev->dev, | |
d4194996 MW |
410 | "VF %d assigned LAN VSI index %d, VSI id %d\n", |
411 | vf->vf_id, vsi->idx, vsi->id); | |
6c12fcbf GR |
412 | /* If the port VLAN has been configured and then the |
413 | * VF driver was removed then the VSI port VLAN | |
414 | * configuration was destroyed. Check if there is | |
415 | * a port VLAN and restore the VSI configuration if | |
416 | * needed. | |
417 | */ | |
418 | if (vf->port_vlan_id) | |
419 | i40e_vsi_add_pvid(vsi, vf->port_vlan_id); | |
5c3c48ac | 420 | f = i40e_add_filter(vsi, vf->default_lan_addr.addr, |
6c12fcbf | 421 | vf->port_vlan_id, true, false); |
1a10370a GR |
422 | if (!f) |
423 | dev_info(&pf->pdev->dev, | |
424 | "Could not allocate VF MAC addr\n"); | |
425 | f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id, | |
426 | true, false); | |
427 | if (!f) | |
428 | dev_info(&pf->pdev->dev, | |
429 | "Could not allocate VF broadcast filter\n"); | |
5c3c48ac | 430 | } |
6dbbbfb2 | 431 | |
5c3c48ac JB |
432 | /* program mac filter */ |
433 | ret = i40e_sync_vsi_filters(vsi); | |
fd1646ee | 434 | if (ret) |
5c3c48ac | 435 | dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); |
5c3c48ac | 436 | |
6b192891 MW |
437 | /* Set VF bandwidth if specified */ |
438 | if (vf->tx_rate) { | |
439 | ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, | |
440 | vf->tx_rate / 50, 0, NULL); | |
441 | if (ret) | |
442 | dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", | |
443 | vf->vf_id, ret); | |
444 | } | |
445 | ||
5c3c48ac JB |
446 | error_alloc_vsi_res: |
447 | return ret; | |
448 | } | |
449 | ||
805bd5bd MW |
450 | /** |
451 | * i40e_enable_vf_mappings | |
452 | * @vf: pointer to the vf info | |
453 | * | |
454 | * enable vf mappings | |
455 | **/ | |
456 | static void i40e_enable_vf_mappings(struct i40e_vf *vf) | |
457 | { | |
458 | struct i40e_pf *pf = vf->pf; | |
459 | struct i40e_hw *hw = &pf->hw; | |
460 | u32 reg, total_queue_pairs = 0; | |
461 | int j; | |
462 | ||
463 | /* Tell the hardware we're using noncontiguous mapping. HW requires | |
464 | * that VF queues be mapped using this method, even when they are | |
465 | * contiguous in real life | |
466 | */ | |
467 | wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), | |
468 | I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); | |
469 | ||
470 | /* enable VF vplan_qtable mappings */ | |
471 | reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; | |
472 | wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); | |
473 | ||
474 | /* map PF queues to VF queues */ | |
475 | for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) { | |
476 | u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j); | |
477 | reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); | |
478 | wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); | |
479 | total_queue_pairs++; | |
480 | } | |
481 | ||
482 | /* map PF queues to VSI */ | |
483 | for (j = 0; j < 7; j++) { | |
484 | if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) { | |
485 | reg = 0x07FF07FF; /* unused */ | |
486 | } else { | |
487 | u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, | |
488 | j * 2); | |
489 | reg = qid; | |
490 | qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, | |
491 | (j * 2) + 1); | |
492 | reg |= qid << 16; | |
493 | } | |
494 | wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg); | |
495 | } | |
496 | ||
497 | i40e_flush(hw); | |
498 | } | |
499 | ||
500 | /** | |
501 | * i40e_disable_vf_mappings | |
502 | * @vf: pointer to the vf info | |
503 | * | |
504 | * disable vf mappings | |
505 | **/ | |
506 | static void i40e_disable_vf_mappings(struct i40e_vf *vf) | |
507 | { | |
508 | struct i40e_pf *pf = vf->pf; | |
509 | struct i40e_hw *hw = &pf->hw; | |
510 | int i; | |
511 | ||
512 | /* disable qp mappings */ | |
513 | wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); | |
514 | for (i = 0; i < I40E_MAX_VSI_QP; i++) | |
515 | wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), | |
516 | I40E_QUEUE_END_OF_LIST); | |
517 | i40e_flush(hw); | |
518 | } | |
519 | ||
520 | /** | |
521 | * i40e_free_vf_res | |
522 | * @vf: pointer to the vf info | |
523 | * | |
524 | * free vf resources | |
525 | **/ | |
526 | static void i40e_free_vf_res(struct i40e_vf *vf) | |
527 | { | |
528 | struct i40e_pf *pf = vf->pf; | |
fc18eaa0 MW |
529 | struct i40e_hw *hw = &pf->hw; |
530 | u32 reg_idx, reg; | |
531 | int i, msix_vf; | |
805bd5bd MW |
532 | |
533 | /* free vsi & disconnect it from the parent uplink */ | |
534 | if (vf->lan_vsi_index) { | |
535 | i40e_vsi_release(pf->vsi[vf->lan_vsi_index]); | |
536 | vf->lan_vsi_index = 0; | |
537 | vf->lan_vsi_id = 0; | |
538 | } | |
9347eb77 MW |
539 | msix_vf = pf->hw.func_caps.num_msix_vectors_vf; |
540 | ||
fc18eaa0 MW |
541 | /* disable interrupts so the VF starts in a known state */ |
542 | for (i = 0; i < msix_vf; i++) { | |
543 | /* format is same for both registers */ | |
544 | if (0 == i) | |
545 | reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); | |
546 | else | |
547 | reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * | |
548 | (vf->vf_id)) | |
549 | + (i - 1)); | |
550 | wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); | |
551 | i40e_flush(hw); | |
552 | } | |
805bd5bd | 553 | |
fc18eaa0 MW |
554 | /* clear the irq settings */ |
555 | for (i = 0; i < msix_vf; i++) { | |
556 | /* format is same for both registers */ | |
557 | if (0 == i) | |
558 | reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); | |
559 | else | |
560 | reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * | |
561 | (vf->vf_id)) | |
562 | + (i - 1)); | |
563 | reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | | |
564 | I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); | |
565 | wr32(hw, reg_idx, reg); | |
566 | i40e_flush(hw); | |
567 | } | |
805bd5bd MW |
568 | /* reset some of the state varibles keeping |
569 | * track of the resources | |
570 | */ | |
571 | vf->num_queue_pairs = 0; | |
572 | vf->vf_states = 0; | |
573 | } | |
574 | ||
575 | /** | |
576 | * i40e_alloc_vf_res | |
577 | * @vf: pointer to the vf info | |
578 | * | |
579 | * allocate vf resources | |
580 | **/ | |
581 | static int i40e_alloc_vf_res(struct i40e_vf *vf) | |
582 | { | |
583 | struct i40e_pf *pf = vf->pf; | |
584 | int total_queue_pairs = 0; | |
585 | int ret; | |
586 | ||
587 | /* allocate hw vsi context & associated resources */ | |
588 | ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); | |
589 | if (ret) | |
590 | goto error_alloc; | |
591 | total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs; | |
592 | set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); | |
593 | ||
594 | /* store the total qps number for the runtime | |
595 | * vf req validation | |
596 | */ | |
597 | vf->num_queue_pairs = total_queue_pairs; | |
598 | ||
599 | /* vf is now completely initialized */ | |
600 | set_bit(I40E_VF_STAT_INIT, &vf->vf_states); | |
601 | ||
602 | error_alloc: | |
603 | if (ret) | |
604 | i40e_free_vf_res(vf); | |
605 | ||
606 | return ret; | |
607 | } | |
608 | ||
fc18eaa0 MW |
609 | #define VF_DEVICE_STATUS 0xAA |
610 | #define VF_TRANS_PENDING_MASK 0x20 | |
611 | /** | |
612 | * i40e_quiesce_vf_pci | |
613 | * @vf: pointer to the vf structure | |
614 | * | |
615 | * Wait for VF PCI transactions to be cleared after reset. Returns -EIO | |
616 | * if the transactions never clear. | |
617 | **/ | |
618 | static int i40e_quiesce_vf_pci(struct i40e_vf *vf) | |
619 | { | |
620 | struct i40e_pf *pf = vf->pf; | |
621 | struct i40e_hw *hw = &pf->hw; | |
622 | int vf_abs_id, i; | |
623 | u32 reg; | |
624 | ||
b141d619 | 625 | vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; |
fc18eaa0 MW |
626 | |
627 | wr32(hw, I40E_PF_PCI_CIAA, | |
628 | VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); | |
629 | for (i = 0; i < 100; i++) { | |
630 | reg = rd32(hw, I40E_PF_PCI_CIAD); | |
631 | if ((reg & VF_TRANS_PENDING_MASK) == 0) | |
632 | return 0; | |
633 | udelay(1); | |
634 | } | |
635 | return -EIO; | |
636 | } | |
637 | ||
5c3c48ac JB |
638 | /** |
639 | * i40e_reset_vf | |
640 | * @vf: pointer to the vf structure | |
641 | * @flr: VFLR was issued or not | |
642 | * | |
643 | * reset the vf | |
644 | **/ | |
fc18eaa0 | 645 | void i40e_reset_vf(struct i40e_vf *vf, bool flr) |
5c3c48ac | 646 | { |
5c3c48ac JB |
647 | struct i40e_pf *pf = vf->pf; |
648 | struct i40e_hw *hw = &pf->hw; | |
5c3c48ac | 649 | bool rsd = false; |
fc18eaa0 MW |
650 | int i; |
651 | u32 reg; | |
5c3c48ac JB |
652 | |
653 | /* warn the VF */ | |
5c3c48ac JB |
654 | clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); |
655 | ||
fc18eaa0 MW |
656 | /* In the case of a VFLR, the HW has already reset the VF and we |
657 | * just need to clean up, so don't hit the VFRTRIG register. | |
5c3c48ac JB |
658 | */ |
659 | if (!flr) { | |
660 | /* reset vf using VPGEN_VFRTRIG reg */ | |
fc18eaa0 MW |
661 | reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); |
662 | reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; | |
5c3c48ac JB |
663 | wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); |
664 | i40e_flush(hw); | |
665 | } | |
666 | ||
fc18eaa0 MW |
667 | if (i40e_quiesce_vf_pci(vf)) |
668 | dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", | |
669 | vf->vf_id); | |
670 | ||
5c3c48ac JB |
671 | /* poll VPGEN_VFRSTAT reg to make sure |
672 | * that reset is complete | |
673 | */ | |
fc18eaa0 | 674 | for (i = 0; i < 100; i++) { |
5c3c48ac JB |
675 | /* vf reset requires driver to first reset the |
676 | * vf & than poll the status register to make sure | |
677 | * that the requested op was completed | |
678 | * successfully | |
679 | */ | |
680 | udelay(10); | |
681 | reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); | |
682 | if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { | |
683 | rsd = true; | |
684 | break; | |
685 | } | |
686 | } | |
687 | ||
688 | if (!rsd) | |
fc18eaa0 | 689 | dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", |
5c3c48ac | 690 | vf->vf_id); |
fc18eaa0 | 691 | wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); |
5c3c48ac JB |
692 | /* clear the reset bit in the VPGEN_VFRTRIG reg */ |
693 | reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); | |
694 | reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; | |
695 | wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); | |
fc18eaa0 MW |
696 | |
697 | /* On initial reset, we won't have any queues */ | |
698 | if (vf->lan_vsi_index == 0) | |
699 | goto complete_reset; | |
700 | ||
701 | i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false); | |
702 | complete_reset: | |
703 | /* reallocate vf resources to reset the VSI state */ | |
704 | i40e_free_vf_res(vf); | |
fc18eaa0 MW |
705 | i40e_alloc_vf_res(vf); |
706 | i40e_enable_vf_mappings(vf); | |
c17b362b | 707 | set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); |
fc18eaa0 | 708 | |
5c3c48ac | 709 | /* tell the VF the reset is done */ |
fc18eaa0 | 710 | wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); |
5c3c48ac | 711 | i40e_flush(hw); |
5c3c48ac JB |
712 | } |
713 | ||
5c3c48ac JB |
714 | /** |
715 | * i40e_vfs_are_assigned | |
716 | * @pf: pointer to the pf structure | |
717 | * | |
718 | * Determine if any VFs are assigned to VMs | |
719 | **/ | |
720 | static bool i40e_vfs_are_assigned(struct i40e_pf *pf) | |
721 | { | |
722 | struct pci_dev *pdev = pf->pdev; | |
723 | struct pci_dev *vfdev; | |
724 | ||
725 | /* loop through all the VFs to see if we own any that are assigned */ | |
ab60085e | 726 | vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL); |
5c3c48ac JB |
727 | while (vfdev) { |
728 | /* if we don't own it we don't care */ | |
729 | if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) { | |
730 | /* if it is assigned we cannot release it */ | |
731 | if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) | |
732 | return true; | |
733 | } | |
734 | ||
735 | vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, | |
ab60085e | 736 | I40E_DEV_ID_VF, |
5c3c48ac JB |
737 | vfdev); |
738 | } | |
739 | ||
740 | return false; | |
741 | } | |
c354229f GR |
742 | #ifdef CONFIG_PCI_IOV |
743 | ||
744 | /** | |
745 | * i40e_enable_pf_switch_lb | |
746 | * @pf: pointer to the pf structure | |
747 | * | |
748 | * enable switch loop back or die - no point in a return value | |
749 | **/ | |
750 | static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) | |
751 | { | |
752 | struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; | |
753 | struct i40e_vsi_context ctxt; | |
754 | int aq_ret; | |
755 | ||
756 | ctxt.seid = pf->main_vsi_seid; | |
757 | ctxt.pf_num = pf->hw.pf_id; | |
758 | ctxt.vf_num = 0; | |
759 | aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); | |
760 | if (aq_ret) { | |
761 | dev_info(&pf->pdev->dev, | |
762 | "%s couldn't get pf vsi config, err %d, aq_err %d\n", | |
763 | __func__, aq_ret, pf->hw.aq.asq_last_status); | |
764 | return; | |
765 | } | |
766 | ctxt.flags = I40E_AQ_VSI_TYPE_PF; | |
767 | ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); | |
768 | ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); | |
769 | ||
770 | aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); | |
771 | if (aq_ret) { | |
772 | dev_info(&pf->pdev->dev, | |
773 | "%s: update vsi switch failed, aq_err=%d\n", | |
774 | __func__, vsi->back->hw.aq.asq_last_status); | |
775 | } | |
776 | } | |
777 | #endif | |
778 | ||
779 | /** | |
780 | * i40e_disable_pf_switch_lb | |
781 | * @pf: pointer to the pf structure | |
782 | * | |
783 | * disable switch loop back or die - no point in a return value | |
784 | **/ | |
785 | static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) | |
786 | { | |
787 | struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; | |
788 | struct i40e_vsi_context ctxt; | |
789 | int aq_ret; | |
790 | ||
791 | ctxt.seid = pf->main_vsi_seid; | |
792 | ctxt.pf_num = pf->hw.pf_id; | |
793 | ctxt.vf_num = 0; | |
794 | aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); | |
795 | if (aq_ret) { | |
796 | dev_info(&pf->pdev->dev, | |
797 | "%s couldn't get pf vsi config, err %d, aq_err %d\n", | |
798 | __func__, aq_ret, pf->hw.aq.asq_last_status); | |
799 | return; | |
800 | } | |
801 | ctxt.flags = I40E_AQ_VSI_TYPE_PF; | |
802 | ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); | |
803 | ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); | |
804 | ||
805 | aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); | |
806 | if (aq_ret) { | |
807 | dev_info(&pf->pdev->dev, | |
808 | "%s: update vsi switch failed, aq_err=%d\n", | |
809 | __func__, vsi->back->hw.aq.asq_last_status); | |
810 | } | |
811 | } | |
5c3c48ac JB |
812 | |
813 | /** | |
814 | * i40e_free_vfs | |
815 | * @pf: pointer to the pf structure | |
816 | * | |
817 | * free vf resources | |
818 | **/ | |
819 | void i40e_free_vfs(struct i40e_pf *pf) | |
820 | { | |
f7414531 MW |
821 | struct i40e_hw *hw = &pf->hw; |
822 | u32 reg_idx, bit_idx; | |
823 | int i, tmp, vf_id; | |
5c3c48ac JB |
824 | |
825 | if (!pf->vf) | |
826 | return; | |
827 | ||
828 | /* Disable interrupt 0 so we don't try to handle the VFLR. */ | |
2ef28cfb MW |
829 | i40e_irq_dynamic_disable_icr0(pf); |
830 | ||
6c1b5bff | 831 | mdelay(10); /* let any messages in transit get finished up */ |
5c3c48ac | 832 | /* free up vf resources */ |
6c1b5bff MW |
833 | tmp = pf->num_alloc_vfs; |
834 | pf->num_alloc_vfs = 0; | |
835 | for (i = 0; i < tmp; i++) { | |
5c3c48ac JB |
836 | if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) |
837 | i40e_free_vf_res(&pf->vf[i]); | |
838 | /* disable qp mappings */ | |
839 | i40e_disable_vf_mappings(&pf->vf[i]); | |
840 | } | |
841 | ||
842 | kfree(pf->vf); | |
843 | pf->vf = NULL; | |
5c3c48ac | 844 | |
f7414531 | 845 | if (!i40e_vfs_are_assigned(pf)) { |
5c3c48ac | 846 | pci_disable_sriov(pf->pdev); |
f7414531 MW |
847 | /* Acknowledge VFLR for all VFS. Without this, VFs will fail to |
848 | * work correctly when SR-IOV gets re-enabled. | |
849 | */ | |
850 | for (vf_id = 0; vf_id < tmp; vf_id++) { | |
851 | reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; | |
852 | bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; | |
853 | wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); | |
854 | } | |
c354229f GR |
855 | i40e_disable_pf_switch_lb(pf); |
856 | } else { | |
5c3c48ac JB |
857 | dev_warn(&pf->pdev->dev, |
858 | "unable to disable SR-IOV because VFs are assigned.\n"); | |
c354229f | 859 | } |
5c3c48ac JB |
860 | |
861 | /* Re-enable interrupt 0. */ | |
2ef28cfb | 862 | i40e_irq_dynamic_enable_icr0(pf); |
5c3c48ac JB |
863 | } |
864 | ||
865 | #ifdef CONFIG_PCI_IOV | |
866 | /** | |
867 | * i40e_alloc_vfs | |
868 | * @pf: pointer to the pf structure | |
869 | * @num_alloc_vfs: number of vfs to allocate | |
870 | * | |
871 | * allocate vf resources | |
872 | **/ | |
4aeec010 | 873 | int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) |
5c3c48ac JB |
874 | { |
875 | struct i40e_vf *vfs; | |
876 | int i, ret = 0; | |
877 | ||
6c1b5bff | 878 | /* Disable interrupt 0 so we don't try to handle the VFLR. */ |
2ef28cfb MW |
879 | i40e_irq_dynamic_disable_icr0(pf); |
880 | ||
4aeec010 MW |
881 | /* Check to see if we're just allocating resources for extant VFs */ |
882 | if (pci_num_vf(pf->pdev) != num_alloc_vfs) { | |
883 | ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); | |
884 | if (ret) { | |
885 | dev_err(&pf->pdev->dev, | |
886 | "Failed to enable SR-IOV, error %d.\n", ret); | |
887 | pf->num_alloc_vfs = 0; | |
888 | goto err_iov; | |
889 | } | |
5c3c48ac | 890 | } |
5c3c48ac | 891 | /* allocate memory */ |
cc6456af | 892 | vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); |
5c3c48ac JB |
893 | if (!vfs) { |
894 | ret = -ENOMEM; | |
895 | goto err_alloc; | |
896 | } | |
897 | ||
898 | /* apply default profile */ | |
899 | for (i = 0; i < num_alloc_vfs; i++) { | |
900 | vfs[i].pf = pf; | |
901 | vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; | |
902 | vfs[i].vf_id = i; | |
903 | ||
904 | /* assign default capabilities */ | |
905 | set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); | |
fc18eaa0 MW |
906 | /* vf resources get allocated during reset */ |
907 | i40e_reset_vf(&vfs[i], false); | |
5c3c48ac JB |
908 | |
909 | /* enable vf vplan_qtable mappings */ | |
910 | i40e_enable_vf_mappings(&vfs[i]); | |
911 | } | |
912 | pf->vf = vfs; | |
913 | pf->num_alloc_vfs = num_alloc_vfs; | |
914 | ||
c354229f | 915 | i40e_enable_pf_switch_lb(pf); |
5c3c48ac JB |
916 | err_alloc: |
917 | if (ret) | |
918 | i40e_free_vfs(pf); | |
919 | err_iov: | |
6c1b5bff | 920 | /* Re-enable interrupt 0. */ |
2ef28cfb | 921 | i40e_irq_dynamic_enable_icr0(pf); |
5c3c48ac JB |
922 | return ret; |
923 | } | |
924 | ||
925 | #endif | |
926 | /** | |
927 | * i40e_pci_sriov_enable | |
928 | * @pdev: pointer to a pci_dev structure | |
929 | * @num_vfs: number of vfs to allocate | |
930 | * | |
931 | * Enable or change the number of VFs | |
932 | **/ | |
933 | static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) | |
934 | { | |
935 | #ifdef CONFIG_PCI_IOV | |
936 | struct i40e_pf *pf = pci_get_drvdata(pdev); | |
937 | int pre_existing_vfs = pci_num_vf(pdev); | |
938 | int err = 0; | |
939 | ||
940 | dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); | |
941 | if (pre_existing_vfs && pre_existing_vfs != num_vfs) | |
942 | i40e_free_vfs(pf); | |
943 | else if (pre_existing_vfs && pre_existing_vfs == num_vfs) | |
944 | goto out; | |
945 | ||
946 | if (num_vfs > pf->num_req_vfs) { | |
947 | err = -EPERM; | |
948 | goto err_out; | |
949 | } | |
950 | ||
951 | err = i40e_alloc_vfs(pf, num_vfs); | |
952 | if (err) { | |
953 | dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); | |
954 | goto err_out; | |
955 | } | |
956 | ||
957 | out: | |
958 | return num_vfs; | |
959 | ||
960 | err_out: | |
961 | return err; | |
962 | #endif | |
963 | return 0; | |
964 | } | |
965 | ||
966 | /** | |
967 | * i40e_pci_sriov_configure | |
968 | * @pdev: pointer to a pci_dev structure | |
969 | * @num_vfs: number of vfs to allocate | |
970 | * | |
971 | * Enable or change the number of VFs. Called when the user updates the number | |
972 | * of VFs in sysfs. | |
973 | **/ | |
974 | int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) | |
975 | { | |
976 | struct i40e_pf *pf = pci_get_drvdata(pdev); | |
977 | ||
978 | if (num_vfs) | |
979 | return i40e_pci_sriov_enable(pdev, num_vfs); | |
980 | ||
981 | i40e_free_vfs(pf); | |
982 | return 0; | |
983 | } | |
984 | ||
985 | /***********************virtual channel routines******************/ | |
986 | ||
987 | /** | |
988 | * i40e_vc_send_msg_to_vf | |
989 | * @vf: pointer to the vf info | |
990 | * @v_opcode: virtual channel opcode | |
991 | * @v_retval: virtual channel return value | |
992 | * @msg: pointer to the msg buffer | |
993 | * @msglen: msg length | |
994 | * | |
995 | * send msg to vf | |
996 | **/ | |
997 | static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, | |
998 | u32 v_retval, u8 *msg, u16 msglen) | |
999 | { | |
1000 | struct i40e_pf *pf = vf->pf; | |
1001 | struct i40e_hw *hw = &pf->hw; | |
7efa84b7 | 1002 | int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id; |
5c3c48ac JB |
1003 | i40e_status aq_ret; |
1004 | ||
1005 | /* single place to detect unsuccessful return values */ | |
1006 | if (v_retval) { | |
1007 | vf->num_invalid_msgs++; | |
1008 | dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n", | |
1009 | v_opcode, v_retval); | |
1010 | if (vf->num_invalid_msgs > | |
1011 | I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { | |
1012 | dev_err(&pf->pdev->dev, | |
1013 | "Number of invalid messages exceeded for VF %d\n", | |
1014 | vf->vf_id); | |
1015 | dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); | |
1016 | set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); | |
1017 | } | |
1018 | } else { | |
1019 | vf->num_valid_msgs++; | |
1020 | } | |
1021 | ||
7efa84b7 MW |
1022 | aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id, v_opcode, v_retval, |
1023 | msg, msglen, NULL); | |
5c3c48ac JB |
1024 | if (aq_ret) { |
1025 | dev_err(&pf->pdev->dev, | |
1026 | "Unable to send the message to VF %d aq_err %d\n", | |
1027 | vf->vf_id, pf->hw.aq.asq_last_status); | |
1028 | return -EIO; | |
1029 | } | |
1030 | ||
1031 | return 0; | |
1032 | } | |
1033 | ||
1034 | /** | |
1035 | * i40e_vc_send_resp_to_vf | |
1036 | * @vf: pointer to the vf info | |
1037 | * @opcode: operation code | |
1038 | * @retval: return value | |
1039 | * | |
1040 | * send resp msg to vf | |
1041 | **/ | |
1042 | static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, | |
1043 | enum i40e_virtchnl_ops opcode, | |
1044 | i40e_status retval) | |
1045 | { | |
1046 | return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); | |
1047 | } | |
1048 | ||
1049 | /** | |
1050 | * i40e_vc_get_version_msg | |
1051 | * @vf: pointer to the vf info | |
1052 | * | |
1053 | * called from the vf to request the API version used by the PF | |
1054 | **/ | |
1055 | static int i40e_vc_get_version_msg(struct i40e_vf *vf) | |
1056 | { | |
1057 | struct i40e_virtchnl_version_info info = { | |
1058 | I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR | |
1059 | }; | |
1060 | ||
1061 | return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, | |
1062 | I40E_SUCCESS, (u8 *)&info, | |
1063 | sizeof(struct | |
1064 | i40e_virtchnl_version_info)); | |
1065 | } | |
1066 | ||
1067 | /** | |
1068 | * i40e_vc_get_vf_resources_msg | |
1069 | * @vf: pointer to the vf info | |
1070 | * @msg: pointer to the msg buffer | |
1071 | * @msglen: msg length | |
1072 | * | |
1073 | * called from the vf to request its resources | |
1074 | **/ | |
1075 | static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) | |
1076 | { | |
1077 | struct i40e_virtchnl_vf_resource *vfres = NULL; | |
1078 | struct i40e_pf *pf = vf->pf; | |
1079 | i40e_status aq_ret = 0; | |
1080 | struct i40e_vsi *vsi; | |
1081 | int i = 0, len = 0; | |
1082 | int num_vsis = 1; | |
1083 | int ret; | |
1084 | ||
1085 | if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { | |
1086 | aq_ret = I40E_ERR_PARAM; | |
1087 | goto err; | |
1088 | } | |
1089 | ||
1090 | len = (sizeof(struct i40e_virtchnl_vf_resource) + | |
1091 | sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis); | |
1092 | ||
1093 | vfres = kzalloc(len, GFP_KERNEL); | |
1094 | if (!vfres) { | |
1095 | aq_ret = I40E_ERR_NO_MEMORY; | |
1096 | len = 0; | |
1097 | goto err; | |
1098 | } | |
1099 | ||
1100 | vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; | |
1101 | vsi = pf->vsi[vf->lan_vsi_index]; | |
1102 | if (!vsi->info.pvid) | |
1103 | vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; | |
1104 | ||
1105 | vfres->num_vsis = num_vsis; | |
1106 | vfres->num_queue_pairs = vf->num_queue_pairs; | |
1107 | vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; | |
1108 | if (vf->lan_vsi_index) { | |
1109 | vfres->vsi_res[i].vsi_id = vf->lan_vsi_index; | |
1110 | vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; | |
1111 | vfres->vsi_res[i].num_queue_pairs = | |
1112 | pf->vsi[vf->lan_vsi_index]->num_queue_pairs; | |
1113 | memcpy(vfres->vsi_res[i].default_mac_addr, | |
1114 | vf->default_lan_addr.addr, ETH_ALEN); | |
1115 | i++; | |
1116 | } | |
1117 | set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); | |
1118 | ||
1119 | err: | |
1120 | /* send the response back to the vf */ | |
1121 | ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, | |
1122 | aq_ret, (u8 *)vfres, len); | |
1123 | ||
1124 | kfree(vfres); | |
1125 | return ret; | |
1126 | } | |
1127 | ||
1128 | /** | |
1129 | * i40e_vc_reset_vf_msg | |
1130 | * @vf: pointer to the vf info | |
1131 | * @msg: pointer to the msg buffer | |
1132 | * @msglen: msg length | |
1133 | * | |
1134 | * called from the vf to reset itself, | |
1135 | * unlike other virtchnl messages, pf driver | |
1136 | * doesn't send the response back to the vf | |
1137 | **/ | |
fc18eaa0 | 1138 | static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) |
5c3c48ac | 1139 | { |
fc18eaa0 MW |
1140 | if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) |
1141 | i40e_reset_vf(vf, false); | |
5c3c48ac JB |
1142 | } |
1143 | ||
1144 | /** | |
1145 | * i40e_vc_config_promiscuous_mode_msg | |
1146 | * @vf: pointer to the vf info | |
1147 | * @msg: pointer to the msg buffer | |
1148 | * @msglen: msg length | |
1149 | * | |
1150 | * called from the vf to configure the promiscuous mode of | |
1151 | * vf vsis | |
1152 | **/ | |
1153 | static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, | |
1154 | u8 *msg, u16 msglen) | |
1155 | { | |
1156 | struct i40e_virtchnl_promisc_info *info = | |
1157 | (struct i40e_virtchnl_promisc_info *)msg; | |
1158 | struct i40e_pf *pf = vf->pf; | |
1159 | struct i40e_hw *hw = &pf->hw; | |
1160 | bool allmulti = false; | |
1161 | bool promisc = false; | |
1162 | i40e_status aq_ret; | |
1163 | ||
1164 | if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || | |
1165 | !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || | |
1166 | !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || | |
1167 | (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) { | |
1168 | aq_ret = I40E_ERR_PARAM; | |
1169 | goto error_param; | |
1170 | } | |
1171 | ||
1172 | if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC) | |
1173 | promisc = true; | |
1174 | aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id, | |
1175 | promisc, NULL); | |
1176 | if (aq_ret) | |
1177 | goto error_param; | |
1178 | ||
1179 | if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) | |
1180 | allmulti = true; | |
1181 | aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id, | |
1182 | allmulti, NULL); | |
1183 | ||
1184 | error_param: | |
1185 | /* send the response to the vf */ | |
1186 | return i40e_vc_send_resp_to_vf(vf, | |
1187 | I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, | |
1188 | aq_ret); | |
1189 | } | |
1190 | ||
1191 | /** | |
1192 | * i40e_vc_config_queues_msg | |
1193 | * @vf: pointer to the vf info | |
1194 | * @msg: pointer to the msg buffer | |
1195 | * @msglen: msg length | |
1196 | * | |
1197 | * called from the vf to configure the rx/tx | |
1198 | * queues | |
1199 | **/ | |
1200 | static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) | |
1201 | { | |
1202 | struct i40e_virtchnl_vsi_queue_config_info *qci = | |
1203 | (struct i40e_virtchnl_vsi_queue_config_info *)msg; | |
1204 | struct i40e_virtchnl_queue_pair_info *qpi; | |
1205 | u16 vsi_id, vsi_queue_id; | |
1206 | i40e_status aq_ret = 0; | |
1207 | int i; | |
1208 | ||
1209 | if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { | |
1210 | aq_ret = I40E_ERR_PARAM; | |
1211 | goto error_param; | |
1212 | } | |
1213 | ||
1214 | vsi_id = qci->vsi_id; | |
1215 | if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { | |
1216 | aq_ret = I40E_ERR_PARAM; | |
1217 | goto error_param; | |
1218 | } | |
1219 | for (i = 0; i < qci->num_queue_pairs; i++) { | |
1220 | qpi = &qci->qpair[i]; | |
1221 | vsi_queue_id = qpi->txq.queue_id; | |
1222 | if ((qpi->txq.vsi_id != vsi_id) || | |
1223 | (qpi->rxq.vsi_id != vsi_id) || | |
1224 | (qpi->rxq.queue_id != vsi_queue_id) || | |
1225 | !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { | |
1226 | aq_ret = I40E_ERR_PARAM; | |
1227 | goto error_param; | |
1228 | } | |
1229 | ||
1230 | if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, | |
1231 | &qpi->rxq) || | |
1232 | i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, | |
1233 | &qpi->txq)) { | |
1234 | aq_ret = I40E_ERR_PARAM; | |
1235 | goto error_param; | |
1236 | } | |
1237 | } | |
1238 | ||
1239 | error_param: | |
1240 | /* send the response to the vf */ | |
1241 | return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, | |
1242 | aq_ret); | |
1243 | } | |
1244 | ||
1245 | /** | |
1246 | * i40e_vc_config_irq_map_msg | |
1247 | * @vf: pointer to the vf info | |
1248 | * @msg: pointer to the msg buffer | |
1249 | * @msglen: msg length | |
1250 | * | |
1251 | * called from the vf to configure the irq to | |
1252 | * queue map | |
1253 | **/ | |
1254 | static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) | |
1255 | { | |
1256 | struct i40e_virtchnl_irq_map_info *irqmap_info = | |
1257 | (struct i40e_virtchnl_irq_map_info *)msg; | |
1258 | struct i40e_virtchnl_vector_map *map; | |
1259 | u16 vsi_id, vsi_queue_id, vector_id; | |
1260 | i40e_status aq_ret = 0; | |
1261 | unsigned long tempmap; | |
1262 | int i; | |
1263 | ||
1264 | if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { | |
1265 | aq_ret = I40E_ERR_PARAM; | |
1266 | goto error_param; | |
1267 | } | |
1268 | ||
1269 | for (i = 0; i < irqmap_info->num_vectors; i++) { | |
1270 | map = &irqmap_info->vecmap[i]; | |
1271 | ||
1272 | vector_id = map->vector_id; | |
1273 | vsi_id = map->vsi_id; | |
1274 | /* validate msg params */ | |
1275 | if (!i40e_vc_isvalid_vector_id(vf, vector_id) || | |
1276 | !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { | |
1277 | aq_ret = I40E_ERR_PARAM; | |
1278 | goto error_param; | |
1279 | } | |
1280 | ||
1281 | /* lookout for the invalid queue index */ | |
1282 | tempmap = map->rxq_map; | |
4836650b | 1283 | for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { |
5c3c48ac JB |
1284 | if (!i40e_vc_isvalid_queue_id(vf, vsi_id, |
1285 | vsi_queue_id)) { | |
1286 | aq_ret = I40E_ERR_PARAM; | |
1287 | goto error_param; | |
1288 | } | |
5c3c48ac JB |
1289 | } |
1290 | ||
1291 | tempmap = map->txq_map; | |
4836650b | 1292 | for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { |
5c3c48ac JB |
1293 | if (!i40e_vc_isvalid_queue_id(vf, vsi_id, |
1294 | vsi_queue_id)) { | |
1295 | aq_ret = I40E_ERR_PARAM; | |
1296 | goto error_param; | |
1297 | } | |
5c3c48ac JB |
1298 | } |
1299 | ||
1300 | i40e_config_irq_link_list(vf, vsi_id, map); | |
1301 | } | |
1302 | error_param: | |
1303 | /* send the response to the vf */ | |
1304 | return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, | |
1305 | aq_ret); | |
1306 | } | |
1307 | ||
1308 | /** | |
1309 | * i40e_vc_enable_queues_msg | |
1310 | * @vf: pointer to the vf info | |
1311 | * @msg: pointer to the msg buffer | |
1312 | * @msglen: msg length | |
1313 | * | |
1314 | * called from the vf to enable all or specific queue(s) | |
1315 | **/ | |
1316 | static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) | |
1317 | { | |
1318 | struct i40e_virtchnl_queue_select *vqs = | |
1319 | (struct i40e_virtchnl_queue_select *)msg; | |
1320 | struct i40e_pf *pf = vf->pf; | |
1321 | u16 vsi_id = vqs->vsi_id; | |
1322 | i40e_status aq_ret = 0; | |
5c3c48ac JB |
1323 | |
1324 | if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { | |
1325 | aq_ret = I40E_ERR_PARAM; | |
1326 | goto error_param; | |
1327 | } | |
1328 | ||
1329 | if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { | |
1330 | aq_ret = I40E_ERR_PARAM; | |
1331 | goto error_param; | |
1332 | } | |
1333 | ||
1334 | if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { | |
1335 | aq_ret = I40E_ERR_PARAM; | |
1336 | goto error_param; | |
1337 | } | |
88f6563d MW |
1338 | if (i40e_vsi_control_rings(pf->vsi[vsi_id], true)) |
1339 | aq_ret = I40E_ERR_TIMEOUT; | |
5c3c48ac JB |
1340 | error_param: |
1341 | /* send the response to the vf */ | |
1342 | return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, | |
1343 | aq_ret); | |
1344 | } | |
1345 | ||
1346 | /** | |
1347 | * i40e_vc_disable_queues_msg | |
1348 | * @vf: pointer to the vf info | |
1349 | * @msg: pointer to the msg buffer | |
1350 | * @msglen: msg length | |
1351 | * | |
1352 | * called from the vf to disable all or specific | |
1353 | * queue(s) | |
1354 | **/ | |
1355 | static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) | |
1356 | { | |
1357 | struct i40e_virtchnl_queue_select *vqs = | |
1358 | (struct i40e_virtchnl_queue_select *)msg; | |
1359 | struct i40e_pf *pf = vf->pf; | |
1360 | u16 vsi_id = vqs->vsi_id; | |
1361 | i40e_status aq_ret = 0; | |
5c3c48ac JB |
1362 | |
1363 | if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { | |
1364 | aq_ret = I40E_ERR_PARAM; | |
1365 | goto error_param; | |
1366 | } | |
1367 | ||
1368 | if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { | |
1369 | aq_ret = I40E_ERR_PARAM; | |
1370 | goto error_param; | |
1371 | } | |
1372 | ||
1373 | if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { | |
1374 | aq_ret = I40E_ERR_PARAM; | |
1375 | goto error_param; | |
1376 | } | |
88f6563d MW |
1377 | if (i40e_vsi_control_rings(pf->vsi[vsi_id], false)) |
1378 | aq_ret = I40E_ERR_TIMEOUT; | |
5c3c48ac JB |
1379 | |
1380 | error_param: | |
1381 | /* send the response to the vf */ | |
1382 | return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, | |
1383 | aq_ret); | |
1384 | } | |
1385 | ||
1386 | /** | |
1387 | * i40e_vc_get_stats_msg | |
1388 | * @vf: pointer to the vf info | |
1389 | * @msg: pointer to the msg buffer | |
1390 | * @msglen: msg length | |
1391 | * | |
1392 | * called from the vf to get vsi stats | |
1393 | **/ | |
1394 | static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) | |
1395 | { | |
1396 | struct i40e_virtchnl_queue_select *vqs = | |
1397 | (struct i40e_virtchnl_queue_select *)msg; | |
1398 | struct i40e_pf *pf = vf->pf; | |
1399 | struct i40e_eth_stats stats; | |
1400 | i40e_status aq_ret = 0; | |
1401 | struct i40e_vsi *vsi; | |
1402 | ||
1403 | memset(&stats, 0, sizeof(struct i40e_eth_stats)); | |
1404 | ||
1405 | if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { | |
1406 | aq_ret = I40E_ERR_PARAM; | |
1407 | goto error_param; | |
1408 | } | |
1409 | ||
1410 | if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { | |
1411 | aq_ret = I40E_ERR_PARAM; | |
1412 | goto error_param; | |
1413 | } | |
1414 | ||
1415 | vsi = pf->vsi[vqs->vsi_id]; | |
1416 | if (!vsi) { | |
1417 | aq_ret = I40E_ERR_PARAM; | |
1418 | goto error_param; | |
1419 | } | |
1420 | i40e_update_eth_stats(vsi); | |
5a9769c8 | 1421 | stats = vsi->eth_stats; |
5c3c48ac JB |
1422 | |
1423 | error_param: | |
1424 | /* send the response back to the vf */ | |
1425 | return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, | |
1426 | (u8 *)&stats, sizeof(stats)); | |
1427 | } | |
1428 | ||
f657a6e1 GR |
1429 | /** |
1430 | * i40e_check_vf_permission | |
1431 | * @vf: pointer to the vf info | |
1432 | * @macaddr: pointer to the MAC Address being checked | |
1433 | * | |
1434 | * Check if the VF has permission to add or delete unicast MAC address | |
1435 | * filters and return error code -EPERM if not. Then check if the | |
1436 | * address filter requested is broadcast or zero and if so return | |
1437 | * an invalid MAC address error code. | |
1438 | **/ | |
1439 | static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) | |
1440 | { | |
1441 | struct i40e_pf *pf = vf->pf; | |
1442 | int ret = 0; | |
1443 | ||
1444 | if (is_broadcast_ether_addr(macaddr) || | |
1445 | is_zero_ether_addr(macaddr)) { | |
1446 | dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); | |
1447 | ret = I40E_ERR_INVALID_MAC_ADDR; | |
5017c2a8 GR |
1448 | } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && |
1449 | !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { | |
f657a6e1 GR |
1450 | /* If the host VMM administrator has set the VF MAC address |
1451 | * administratively via the ndo_set_vf_mac command then deny | |
1452 | * permission to the VF to add or delete unicast MAC addresses. | |
5017c2a8 GR |
1453 | * The VF may request to set the MAC address filter already |
1454 | * assigned to it so do not return an error in that case. | |
f657a6e1 GR |
1455 | */ |
1456 | dev_err(&pf->pdev->dev, | |
1457 | "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n"); | |
1458 | ret = -EPERM; | |
1459 | } | |
1460 | return ret; | |
1461 | } | |
1462 | ||
5c3c48ac JB |
1463 | /** |
1464 | * i40e_vc_add_mac_addr_msg | |
1465 | * @vf: pointer to the vf info | |
1466 | * @msg: pointer to the msg buffer | |
1467 | * @msglen: msg length | |
1468 | * | |
1469 | * add guest mac address filter | |
1470 | **/ | |
1471 | static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) | |
1472 | { | |
1473 | struct i40e_virtchnl_ether_addr_list *al = | |
1474 | (struct i40e_virtchnl_ether_addr_list *)msg; | |
1475 | struct i40e_pf *pf = vf->pf; | |
1476 | struct i40e_vsi *vsi = NULL; | |
1477 | u16 vsi_id = al->vsi_id; | |
f657a6e1 | 1478 | i40e_status ret = 0; |
5c3c48ac JB |
1479 | int i; |
1480 | ||
1481 | if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || | |
1482 | !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || | |
1483 | !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { | |
f657a6e1 | 1484 | ret = I40E_ERR_PARAM; |
5c3c48ac JB |
1485 | goto error_param; |
1486 | } | |
1487 | ||
1488 | for (i = 0; i < al->num_elements; i++) { | |
f657a6e1 GR |
1489 | ret = i40e_check_vf_permission(vf, al->list[i].addr); |
1490 | if (ret) | |
5c3c48ac | 1491 | goto error_param; |
5c3c48ac JB |
1492 | } |
1493 | vsi = pf->vsi[vsi_id]; | |
1494 | ||
1495 | /* add new addresses to the list */ | |
1496 | for (i = 0; i < al->num_elements; i++) { | |
1497 | struct i40e_mac_filter *f; | |
1498 | ||
1499 | f = i40e_find_mac(vsi, al->list[i].addr, true, false); | |
7e68edf9 | 1500 | if (!f) { |
5c3c48ac JB |
1501 | if (i40e_is_vsi_in_vlan(vsi)) |
1502 | f = i40e_put_mac_in_vlan(vsi, al->list[i].addr, | |
1503 | true, false); | |
1504 | else | |
1505 | f = i40e_add_filter(vsi, al->list[i].addr, -1, | |
1506 | true, false); | |
1507 | } | |
1508 | ||
1509 | if (!f) { | |
1510 | dev_err(&pf->pdev->dev, | |
1511 | "Unable to add VF MAC filter\n"); | |
f657a6e1 | 1512 | ret = I40E_ERR_PARAM; |
5c3c48ac JB |
1513 | goto error_param; |
1514 | } | |
1515 | } | |
1516 | ||
1517 | /* program the updated filter list */ | |
1518 | if (i40e_sync_vsi_filters(vsi)) | |
1519 | dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); | |
1520 | ||
1521 | error_param: | |
1522 | /* send the response to the vf */ | |
1523 | return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, | |
f657a6e1 | 1524 | ret); |
5c3c48ac JB |
1525 | } |
1526 | ||
1527 | /** | |
1528 | * i40e_vc_del_mac_addr_msg | |
1529 | * @vf: pointer to the vf info | |
1530 | * @msg: pointer to the msg buffer | |
1531 | * @msglen: msg length | |
1532 | * | |
1533 | * remove guest mac address filter | |
1534 | **/ | |
1535 | static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) | |
1536 | { | |
1537 | struct i40e_virtchnl_ether_addr_list *al = | |
1538 | (struct i40e_virtchnl_ether_addr_list *)msg; | |
1539 | struct i40e_pf *pf = vf->pf; | |
1540 | struct i40e_vsi *vsi = NULL; | |
1541 | u16 vsi_id = al->vsi_id; | |
f657a6e1 | 1542 | i40e_status ret = 0; |
5c3c48ac JB |
1543 | int i; |
1544 | ||
1545 | if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || | |
1546 | !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || | |
1547 | !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { | |
f657a6e1 | 1548 | ret = I40E_ERR_PARAM; |
5c3c48ac JB |
1549 | goto error_param; |
1550 | } | |
f657a6e1 GR |
1551 | |
1552 | for (i = 0; i < al->num_elements; i++) { | |
700bbf6c MW |
1553 | if (is_broadcast_ether_addr(al->list[i].addr) || |
1554 | is_zero_ether_addr(al->list[i].addr)) { | |
1555 | dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", | |
1556 | al->list[i].addr); | |
1557 | ret = I40E_ERR_INVALID_MAC_ADDR; | |
f657a6e1 | 1558 | goto error_param; |
700bbf6c | 1559 | } |
f657a6e1 | 1560 | } |
5c3c48ac JB |
1561 | vsi = pf->vsi[vsi_id]; |
1562 | ||
1563 | /* delete addresses from the list */ | |
1564 | for (i = 0; i < al->num_elements; i++) | |
1565 | i40e_del_filter(vsi, al->list[i].addr, | |
1566 | I40E_VLAN_ANY, true, false); | |
1567 | ||
1568 | /* program the updated filter list */ | |
1569 | if (i40e_sync_vsi_filters(vsi)) | |
1570 | dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); | |
1571 | ||
1572 | error_param: | |
1573 | /* send the response to the vf */ | |
1574 | return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, | |
f657a6e1 | 1575 | ret); |
5c3c48ac JB |
1576 | } |
1577 | ||
1578 | /** | |
1579 | * i40e_vc_add_vlan_msg | |
1580 | * @vf: pointer to the vf info | |
1581 | * @msg: pointer to the msg buffer | |
1582 | * @msglen: msg length | |
1583 | * | |
1584 | * program guest vlan id | |
1585 | **/ | |
1586 | static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) | |
1587 | { | |
1588 | struct i40e_virtchnl_vlan_filter_list *vfl = | |
1589 | (struct i40e_virtchnl_vlan_filter_list *)msg; | |
1590 | struct i40e_pf *pf = vf->pf; | |
1591 | struct i40e_vsi *vsi = NULL; | |
1592 | u16 vsi_id = vfl->vsi_id; | |
1593 | i40e_status aq_ret = 0; | |
1594 | int i; | |
1595 | ||
1596 | if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || | |
1597 | !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || | |
1598 | !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { | |
1599 | aq_ret = I40E_ERR_PARAM; | |
1600 | goto error_param; | |
1601 | } | |
1602 | ||
1603 | for (i = 0; i < vfl->num_elements; i++) { | |
1604 | if (vfl->vlan_id[i] > I40E_MAX_VLANID) { | |
1605 | aq_ret = I40E_ERR_PARAM; | |
1606 | dev_err(&pf->pdev->dev, | |
1607 | "invalid VF VLAN id %d\n", vfl->vlan_id[i]); | |
1608 | goto error_param; | |
1609 | } | |
1610 | } | |
1611 | vsi = pf->vsi[vsi_id]; | |
1612 | if (vsi->info.pvid) { | |
1613 | aq_ret = I40E_ERR_PARAM; | |
1614 | goto error_param; | |
1615 | } | |
1616 | ||
1617 | i40e_vlan_stripping_enable(vsi); | |
1618 | for (i = 0; i < vfl->num_elements; i++) { | |
1619 | /* add new VLAN filter */ | |
1620 | int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); | |
1621 | if (ret) | |
1622 | dev_err(&pf->pdev->dev, | |
1623 | "Unable to add VF vlan filter %d, error %d\n", | |
1624 | vfl->vlan_id[i], ret); | |
1625 | } | |
1626 | ||
1627 | error_param: | |
1628 | /* send the response to the vf */ | |
1629 | return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); | |
1630 | } | |
1631 | ||
1632 | /** | |
1633 | * i40e_vc_remove_vlan_msg | |
1634 | * @vf: pointer to the vf info | |
1635 | * @msg: pointer to the msg buffer | |
1636 | * @msglen: msg length | |
1637 | * | |
1638 | * remove programmed guest vlan id | |
1639 | **/ | |
1640 | static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) | |
1641 | { | |
1642 | struct i40e_virtchnl_vlan_filter_list *vfl = | |
1643 | (struct i40e_virtchnl_vlan_filter_list *)msg; | |
1644 | struct i40e_pf *pf = vf->pf; | |
1645 | struct i40e_vsi *vsi = NULL; | |
1646 | u16 vsi_id = vfl->vsi_id; | |
1647 | i40e_status aq_ret = 0; | |
1648 | int i; | |
1649 | ||
1650 | if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || | |
1651 | !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || | |
1652 | !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { | |
1653 | aq_ret = I40E_ERR_PARAM; | |
1654 | goto error_param; | |
1655 | } | |
1656 | ||
1657 | for (i = 0; i < vfl->num_elements; i++) { | |
1658 | if (vfl->vlan_id[i] > I40E_MAX_VLANID) { | |
1659 | aq_ret = I40E_ERR_PARAM; | |
1660 | goto error_param; | |
1661 | } | |
1662 | } | |
1663 | ||
1664 | vsi = pf->vsi[vsi_id]; | |
1665 | if (vsi->info.pvid) { | |
1666 | aq_ret = I40E_ERR_PARAM; | |
1667 | goto error_param; | |
1668 | } | |
1669 | ||
1670 | for (i = 0; i < vfl->num_elements; i++) { | |
1671 | int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); | |
1672 | if (ret) | |
1673 | dev_err(&pf->pdev->dev, | |
1674 | "Unable to delete VF vlan filter %d, error %d\n", | |
1675 | vfl->vlan_id[i], ret); | |
1676 | } | |
1677 | ||
1678 | error_param: | |
1679 | /* send the response to the vf */ | |
1680 | return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); | |
1681 | } | |
1682 | ||
5c3c48ac JB |
1683 | /** |
1684 | * i40e_vc_validate_vf_msg | |
1685 | * @vf: pointer to the vf info | |
1686 | * @msg: pointer to the msg buffer | |
1687 | * @msglen: msg length | |
1688 | * @msghndl: msg handle | |
1689 | * | |
1690 | * validate msg | |
1691 | **/ | |
1692 | static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, | |
1693 | u32 v_retval, u8 *msg, u16 msglen) | |
1694 | { | |
1695 | bool err_msg_format = false; | |
1696 | int valid_len; | |
1697 | ||
1698 | /* Check if VF is disabled. */ | |
1699 | if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) | |
1700 | return I40E_ERR_PARAM; | |
1701 | ||
1702 | /* Validate message length. */ | |
1703 | switch (v_opcode) { | |
1704 | case I40E_VIRTCHNL_OP_VERSION: | |
1705 | valid_len = sizeof(struct i40e_virtchnl_version_info); | |
1706 | break; | |
1707 | case I40E_VIRTCHNL_OP_RESET_VF: | |
1708 | case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: | |
1709 | valid_len = 0; | |
1710 | break; | |
1711 | case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: | |
1712 | valid_len = sizeof(struct i40e_virtchnl_txq_info); | |
1713 | break; | |
1714 | case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: | |
1715 | valid_len = sizeof(struct i40e_virtchnl_rxq_info); | |
1716 | break; | |
1717 | case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: | |
1718 | valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info); | |
1719 | if (msglen >= valid_len) { | |
1720 | struct i40e_virtchnl_vsi_queue_config_info *vqc = | |
1721 | (struct i40e_virtchnl_vsi_queue_config_info *)msg; | |
1722 | valid_len += (vqc->num_queue_pairs * | |
1723 | sizeof(struct | |
1724 | i40e_virtchnl_queue_pair_info)); | |
1725 | if (vqc->num_queue_pairs == 0) | |
1726 | err_msg_format = true; | |
1727 | } | |
1728 | break; | |
1729 | case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: | |
1730 | valid_len = sizeof(struct i40e_virtchnl_irq_map_info); | |
1731 | if (msglen >= valid_len) { | |
1732 | struct i40e_virtchnl_irq_map_info *vimi = | |
1733 | (struct i40e_virtchnl_irq_map_info *)msg; | |
1734 | valid_len += (vimi->num_vectors * | |
1735 | sizeof(struct i40e_virtchnl_vector_map)); | |
1736 | if (vimi->num_vectors == 0) | |
1737 | err_msg_format = true; | |
1738 | } | |
1739 | break; | |
1740 | case I40E_VIRTCHNL_OP_ENABLE_QUEUES: | |
1741 | case I40E_VIRTCHNL_OP_DISABLE_QUEUES: | |
1742 | valid_len = sizeof(struct i40e_virtchnl_queue_select); | |
1743 | break; | |
1744 | case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: | |
1745 | case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: | |
1746 | valid_len = sizeof(struct i40e_virtchnl_ether_addr_list); | |
1747 | if (msglen >= valid_len) { | |
1748 | struct i40e_virtchnl_ether_addr_list *veal = | |
1749 | (struct i40e_virtchnl_ether_addr_list *)msg; | |
1750 | valid_len += veal->num_elements * | |
1751 | sizeof(struct i40e_virtchnl_ether_addr); | |
1752 | if (veal->num_elements == 0) | |
1753 | err_msg_format = true; | |
1754 | } | |
1755 | break; | |
1756 | case I40E_VIRTCHNL_OP_ADD_VLAN: | |
1757 | case I40E_VIRTCHNL_OP_DEL_VLAN: | |
1758 | valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list); | |
1759 | if (msglen >= valid_len) { | |
1760 | struct i40e_virtchnl_vlan_filter_list *vfl = | |
1761 | (struct i40e_virtchnl_vlan_filter_list *)msg; | |
1762 | valid_len += vfl->num_elements * sizeof(u16); | |
1763 | if (vfl->num_elements == 0) | |
1764 | err_msg_format = true; | |
1765 | } | |
1766 | break; | |
1767 | case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: | |
1768 | valid_len = sizeof(struct i40e_virtchnl_promisc_info); | |
1769 | break; | |
1770 | case I40E_VIRTCHNL_OP_GET_STATS: | |
1771 | valid_len = sizeof(struct i40e_virtchnl_queue_select); | |
1772 | break; | |
1773 | /* These are always errors coming from the VF. */ | |
1774 | case I40E_VIRTCHNL_OP_EVENT: | |
1775 | case I40E_VIRTCHNL_OP_UNKNOWN: | |
1776 | default: | |
1777 | return -EPERM; | |
1778 | break; | |
1779 | } | |
1780 | /* few more checks */ | |
1781 | if ((valid_len != msglen) || (err_msg_format)) { | |
1782 | i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); | |
1783 | return -EINVAL; | |
1784 | } else { | |
1785 | return 0; | |
1786 | } | |
1787 | } | |
1788 | ||
1789 | /** | |
1790 | * i40e_vc_process_vf_msg | |
1791 | * @pf: pointer to the pf structure | |
1792 | * @vf_id: source vf id | |
1793 | * @msg: pointer to the msg buffer | |
1794 | * @msglen: msg length | |
1795 | * @msghndl: msg handle | |
1796 | * | |
1797 | * called from the common aeq/arq handler to | |
1798 | * process request from vf | |
1799 | **/ | |
1800 | int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, | |
1801 | u32 v_retval, u8 *msg, u16 msglen) | |
1802 | { | |
5c3c48ac | 1803 | struct i40e_hw *hw = &pf->hw; |
c243e963 | 1804 | unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id; |
6c1b5bff | 1805 | struct i40e_vf *vf; |
5c3c48ac JB |
1806 | int ret; |
1807 | ||
1808 | pf->vf_aq_requests++; | |
7efa84b7 | 1809 | if (local_vf_id >= pf->num_alloc_vfs) |
6c1b5bff | 1810 | return -EINVAL; |
7efa84b7 | 1811 | vf = &(pf->vf[local_vf_id]); |
5c3c48ac JB |
1812 | /* perform basic checks on the msg */ |
1813 | ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); | |
1814 | ||
1815 | if (ret) { | |
499ec80f | 1816 | dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n", |
7efa84b7 | 1817 | local_vf_id, v_opcode, msglen); |
5c3c48ac JB |
1818 | return ret; |
1819 | } | |
bae3cae4 | 1820 | |
5c3c48ac JB |
1821 | switch (v_opcode) { |
1822 | case I40E_VIRTCHNL_OP_VERSION: | |
1823 | ret = i40e_vc_get_version_msg(vf); | |
1824 | break; | |
1825 | case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: | |
1826 | ret = i40e_vc_get_vf_resources_msg(vf); | |
1827 | break; | |
1828 | case I40E_VIRTCHNL_OP_RESET_VF: | |
fc18eaa0 MW |
1829 | i40e_vc_reset_vf_msg(vf); |
1830 | ret = 0; | |
5c3c48ac JB |
1831 | break; |
1832 | case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: | |
1833 | ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); | |
1834 | break; | |
1835 | case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: | |
1836 | ret = i40e_vc_config_queues_msg(vf, msg, msglen); | |
1837 | break; | |
1838 | case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: | |
1839 | ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); | |
1840 | break; | |
1841 | case I40E_VIRTCHNL_OP_ENABLE_QUEUES: | |
1842 | ret = i40e_vc_enable_queues_msg(vf, msg, msglen); | |
1843 | break; | |
1844 | case I40E_VIRTCHNL_OP_DISABLE_QUEUES: | |
1845 | ret = i40e_vc_disable_queues_msg(vf, msg, msglen); | |
1846 | break; | |
1847 | case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: | |
1848 | ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); | |
1849 | break; | |
1850 | case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: | |
1851 | ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); | |
1852 | break; | |
1853 | case I40E_VIRTCHNL_OP_ADD_VLAN: | |
1854 | ret = i40e_vc_add_vlan_msg(vf, msg, msglen); | |
1855 | break; | |
1856 | case I40E_VIRTCHNL_OP_DEL_VLAN: | |
1857 | ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); | |
1858 | break; | |
1859 | case I40E_VIRTCHNL_OP_GET_STATS: | |
1860 | ret = i40e_vc_get_stats_msg(vf, msg, msglen); | |
1861 | break; | |
5c3c48ac JB |
1862 | case I40E_VIRTCHNL_OP_UNKNOWN: |
1863 | default: | |
7efa84b7 MW |
1864 | dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n", |
1865 | v_opcode, local_vf_id); | |
5c3c48ac JB |
1866 | ret = i40e_vc_send_resp_to_vf(vf, v_opcode, |
1867 | I40E_ERR_NOT_IMPLEMENTED); | |
1868 | break; | |
1869 | } | |
1870 | ||
1871 | return ret; | |
1872 | } | |
1873 | ||
1874 | /** | |
1875 | * i40e_vc_process_vflr_event | |
1876 | * @pf: pointer to the pf structure | |
1877 | * | |
1878 | * called from the vlfr irq handler to | |
1879 | * free up vf resources and state variables | |
1880 | **/ | |
1881 | int i40e_vc_process_vflr_event(struct i40e_pf *pf) | |
1882 | { | |
1883 | u32 reg, reg_idx, bit_idx, vf_id; | |
1884 | struct i40e_hw *hw = &pf->hw; | |
1885 | struct i40e_vf *vf; | |
1886 | ||
1887 | if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) | |
1888 | return 0; | |
1889 | ||
1890 | clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); | |
1891 | for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { | |
1892 | reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; | |
1893 | bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; | |
1894 | /* read GLGEN_VFLRSTAT register to find out the flr vfs */ | |
1895 | vf = &pf->vf[vf_id]; | |
1896 | reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); | |
1897 | if (reg & (1 << bit_idx)) { | |
1898 | /* clear the bit in GLGEN_VFLRSTAT */ | |
1899 | wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); | |
1900 | ||
eb2d80bc MW |
1901 | if (!test_bit(__I40E_DOWN, &pf->state)) |
1902 | i40e_reset_vf(vf, true); | |
5c3c48ac JB |
1903 | } |
1904 | } | |
1905 | ||
1906 | /* re-enable vflr interrupt cause */ | |
1907 | reg = rd32(hw, I40E_PFINT_ICR0_ENA); | |
1908 | reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; | |
1909 | wr32(hw, I40E_PFINT_ICR0_ENA, reg); | |
1910 | i40e_flush(hw); | |
1911 | ||
1912 | return 0; | |
1913 | } | |
1914 | ||
1915 | /** | |
1916 | * i40e_vc_vf_broadcast | |
1917 | * @pf: pointer to the pf structure | |
1918 | * @opcode: operation code | |
1919 | * @retval: return value | |
1920 | * @msg: pointer to the msg buffer | |
1921 | * @msglen: msg length | |
1922 | * | |
1923 | * send a message to all VFs on a given PF | |
1924 | **/ | |
1925 | static void i40e_vc_vf_broadcast(struct i40e_pf *pf, | |
1926 | enum i40e_virtchnl_ops v_opcode, | |
1927 | i40e_status v_retval, u8 *msg, | |
1928 | u16 msglen) | |
1929 | { | |
1930 | struct i40e_hw *hw = &pf->hw; | |
1931 | struct i40e_vf *vf = pf->vf; | |
1932 | int i; | |
1933 | ||
1934 | for (i = 0; i < pf->num_alloc_vfs; i++) { | |
1935 | /* Ignore return value on purpose - a given VF may fail, but | |
1936 | * we need to keep going and send to all of them | |
1937 | */ | |
1938 | i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, | |
1939 | msg, msglen, NULL); | |
1940 | vf++; | |
1941 | } | |
1942 | } | |
1943 | ||
1944 | /** | |
1945 | * i40e_vc_notify_link_state | |
1946 | * @pf: pointer to the pf structure | |
1947 | * | |
1948 | * send a link status message to all VFs on a given PF | |
1949 | **/ | |
1950 | void i40e_vc_notify_link_state(struct i40e_pf *pf) | |
1951 | { | |
1952 | struct i40e_virtchnl_pf_event pfe; | |
588aefa0 MW |
1953 | struct i40e_hw *hw = &pf->hw; |
1954 | struct i40e_vf *vf = pf->vf; | |
1955 | struct i40e_link_status *ls = &pf->hw.phy.link_info; | |
1956 | int i; | |
5c3c48ac JB |
1957 | |
1958 | pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; | |
1959 | pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; | |
588aefa0 MW |
1960 | for (i = 0; i < pf->num_alloc_vfs; i++) { |
1961 | if (vf->link_forced) { | |
1962 | pfe.event_data.link_event.link_status = vf->link_up; | |
1963 | pfe.event_data.link_event.link_speed = | |
1964 | (vf->link_up ? I40E_LINK_SPEED_40GB : 0); | |
1965 | } else { | |
1966 | pfe.event_data.link_event.link_status = | |
1967 | ls->link_info & I40E_AQ_LINK_UP; | |
1968 | pfe.event_data.link_event.link_speed = ls->link_speed; | |
1969 | } | |
1970 | i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, | |
1971 | 0, (u8 *)&pfe, sizeof(pfe), | |
1972 | NULL); | |
1973 | vf++; | |
1974 | } | |
5c3c48ac JB |
1975 | } |
1976 | ||
1977 | /** | |
1978 | * i40e_vc_notify_reset | |
1979 | * @pf: pointer to the pf structure | |
1980 | * | |
1981 | * indicate a pending reset to all VFs on a given PF | |
1982 | **/ | |
1983 | void i40e_vc_notify_reset(struct i40e_pf *pf) | |
1984 | { | |
1985 | struct i40e_virtchnl_pf_event pfe; | |
1986 | ||
1987 | pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; | |
1988 | pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; | |
1989 | i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, | |
1990 | (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); | |
1991 | } | |
1992 | ||
1993 | /** | |
1994 | * i40e_vc_notify_vf_reset | |
1995 | * @vf: pointer to the vf structure | |
1996 | * | |
1997 | * indicate a pending reset to the given VF | |
1998 | **/ | |
1999 | void i40e_vc_notify_vf_reset(struct i40e_vf *vf) | |
2000 | { | |
2001 | struct i40e_virtchnl_pf_event pfe; | |
2002 | ||
2003 | pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; | |
2004 | pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; | |
2005 | i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, | |
2006 | I40E_SUCCESS, (u8 *)&pfe, | |
2007 | sizeof(struct i40e_virtchnl_pf_event), NULL); | |
2008 | } | |
2009 | ||
2010 | /** | |
2011 | * i40e_ndo_set_vf_mac | |
2012 | * @netdev: network interface device structure | |
2013 | * @vf_id: vf identifier | |
2014 | * @mac: mac address | |
2015 | * | |
2016 | * program vf mac address | |
2017 | **/ | |
2018 | int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) | |
2019 | { | |
2020 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
2021 | struct i40e_vsi *vsi = np->vsi; | |
2022 | struct i40e_pf *pf = vsi->back; | |
2023 | struct i40e_mac_filter *f; | |
2024 | struct i40e_vf *vf; | |
2025 | int ret = 0; | |
2026 | ||
2027 | /* validate the request */ | |
2028 | if (vf_id >= pf->num_alloc_vfs) { | |
2029 | dev_err(&pf->pdev->dev, | |
2030 | "Invalid VF Identifier %d\n", vf_id); | |
2031 | ret = -EINVAL; | |
2032 | goto error_param; | |
2033 | } | |
2034 | ||
2035 | vf = &(pf->vf[vf_id]); | |
2036 | vsi = pf->vsi[vf->lan_vsi_index]; | |
2037 | if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { | |
2038 | dev_err(&pf->pdev->dev, | |
2039 | "Uninitialized VF %d\n", vf_id); | |
2040 | ret = -EINVAL; | |
2041 | goto error_param; | |
2042 | } | |
2043 | ||
2044 | if (!is_valid_ether_addr(mac)) { | |
2045 | dev_err(&pf->pdev->dev, | |
2046 | "Invalid VF ethernet address\n"); | |
2047 | ret = -EINVAL; | |
2048 | goto error_param; | |
2049 | } | |
2050 | ||
2051 | /* delete the temporary mac address */ | |
2052 | i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false); | |
2053 | ||
2054 | /* add the new mac address */ | |
2055 | f = i40e_add_filter(vsi, mac, 0, true, false); | |
2056 | if (!f) { | |
2057 | dev_err(&pf->pdev->dev, | |
2058 | "Unable to add VF ucast filter\n"); | |
2059 | ret = -ENOMEM; | |
2060 | goto error_param; | |
2061 | } | |
2062 | ||
2063 | dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); | |
2064 | /* program mac filter */ | |
2065 | if (i40e_sync_vsi_filters(vsi)) { | |
2066 | dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); | |
2067 | ret = -EIO; | |
2068 | goto error_param; | |
2069 | } | |
2070 | memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN); | |
f657a6e1 | 2071 | vf->pf_set_mac = true; |
5c3c48ac JB |
2072 | dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); |
2073 | ret = 0; | |
2074 | ||
2075 | error_param: | |
2076 | return ret; | |
2077 | } | |
2078 | ||
2079 | /** | |
2080 | * i40e_ndo_set_vf_port_vlan | |
2081 | * @netdev: network interface device structure | |
2082 | * @vf_id: vf identifier | |
2083 | * @vlan_id: mac address | |
2084 | * @qos: priority setting | |
2085 | * | |
2086 | * program vf vlan id and/or qos | |
2087 | **/ | |
2088 | int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, | |
2089 | int vf_id, u16 vlan_id, u8 qos) | |
2090 | { | |
2091 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
2092 | struct i40e_pf *pf = np->vsi->back; | |
2093 | struct i40e_vsi *vsi; | |
2094 | struct i40e_vf *vf; | |
2095 | int ret = 0; | |
2096 | ||
2097 | /* validate the request */ | |
2098 | if (vf_id >= pf->num_alloc_vfs) { | |
2099 | dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); | |
2100 | ret = -EINVAL; | |
2101 | goto error_pvid; | |
2102 | } | |
2103 | ||
2104 | if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { | |
2105 | dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); | |
2106 | ret = -EINVAL; | |
2107 | goto error_pvid; | |
2108 | } | |
2109 | ||
2110 | vf = &(pf->vf[vf_id]); | |
2111 | vsi = pf->vsi[vf->lan_vsi_index]; | |
2112 | if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { | |
2113 | dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); | |
2114 | ret = -EINVAL; | |
2115 | goto error_pvid; | |
2116 | } | |
2117 | ||
f9b4b627 | 2118 | if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) { |
99a4973c GR |
2119 | dev_err(&pf->pdev->dev, |
2120 | "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", | |
2121 | vf_id); | |
f9b4b627 GR |
2122 | /* Administrator Error - knock the VF offline until he does |
2123 | * the right thing by reconfiguring his network correctly | |
2124 | * and then reloading the VF driver. | |
2125 | */ | |
2126 | i40e_vc_disable_vf(pf, vf); | |
2127 | } | |
99a4973c | 2128 | |
8d82a7c5 GR |
2129 | /* Check for condition where there was already a port VLAN ID |
2130 | * filter set and now it is being deleted by setting it to zero. | |
2131 | * Before deleting all the old VLAN filters we must add new ones | |
2132 | * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our | |
2133 | * MAC addresses deleted. | |
2134 | */ | |
2135 | if (!(vlan_id || qos) && vsi->info.pvid) | |
2136 | ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); | |
2137 | ||
5c3c48ac JB |
2138 | if (vsi->info.pvid) { |
2139 | /* kill old VLAN */ | |
2140 | ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & | |
2141 | VLAN_VID_MASK)); | |
2142 | if (ret) { | |
2143 | dev_info(&vsi->back->pdev->dev, | |
2144 | "remove VLAN failed, ret=%d, aq_err=%d\n", | |
2145 | ret, pf->hw.aq.asq_last_status); | |
2146 | } | |
2147 | } | |
2148 | if (vlan_id || qos) | |
2149 | ret = i40e_vsi_add_pvid(vsi, | |
2150 | vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)); | |
2151 | else | |
6c12fcbf | 2152 | i40e_vsi_remove_pvid(vsi); |
5c3c48ac JB |
2153 | |
2154 | if (vlan_id) { | |
2155 | dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", | |
2156 | vlan_id, qos, vf_id); | |
2157 | ||
2158 | /* add new VLAN filter */ | |
2159 | ret = i40e_vsi_add_vlan(vsi, vlan_id); | |
2160 | if (ret) { | |
2161 | dev_info(&vsi->back->pdev->dev, | |
2162 | "add VF VLAN failed, ret=%d aq_err=%d\n", ret, | |
2163 | vsi->back->hw.aq.asq_last_status); | |
2164 | goto error_pvid; | |
2165 | } | |
8d82a7c5 GR |
2166 | /* Kill non-vlan MAC filters - ignore error return since |
2167 | * there might not be any non-vlan MAC filters. | |
2168 | */ | |
2169 | i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY); | |
5c3c48ac JB |
2170 | } |
2171 | ||
2172 | if (ret) { | |
2173 | dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); | |
2174 | goto error_pvid; | |
2175 | } | |
6c12fcbf GR |
2176 | /* The Port VLAN needs to be saved across resets the same as the |
2177 | * default LAN MAC address. | |
2178 | */ | |
2179 | vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); | |
5c3c48ac JB |
2180 | ret = 0; |
2181 | ||
2182 | error_pvid: | |
2183 | return ret; | |
2184 | } | |
2185 | ||
2186 | /** | |
2187 | * i40e_ndo_set_vf_bw | |
2188 | * @netdev: network interface device structure | |
2189 | * @vf_id: vf identifier | |
2190 | * @tx_rate: tx rate | |
2191 | * | |
2192 | * configure vf tx rate | |
2193 | **/ | |
2194 | int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate) | |
2195 | { | |
6b192891 MW |
2196 | struct i40e_netdev_priv *np = netdev_priv(netdev); |
2197 | struct i40e_pf *pf = np->vsi->back; | |
2198 | struct i40e_vsi *vsi; | |
2199 | struct i40e_vf *vf; | |
2200 | int speed = 0; | |
2201 | int ret = 0; | |
2202 | ||
2203 | /* validate the request */ | |
2204 | if (vf_id >= pf->num_alloc_vfs) { | |
2205 | dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); | |
2206 | ret = -EINVAL; | |
2207 | goto error; | |
2208 | } | |
2209 | ||
2210 | vf = &(pf->vf[vf_id]); | |
2211 | vsi = pf->vsi[vf->lan_vsi_index]; | |
2212 | if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { | |
2213 | dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id); | |
2214 | ret = -EINVAL; | |
2215 | goto error; | |
2216 | } | |
2217 | ||
2218 | switch (pf->hw.phy.link_info.link_speed) { | |
2219 | case I40E_LINK_SPEED_40GB: | |
2220 | speed = 40000; | |
2221 | break; | |
2222 | case I40E_LINK_SPEED_10GB: | |
2223 | speed = 10000; | |
2224 | break; | |
2225 | case I40E_LINK_SPEED_1GB: | |
2226 | speed = 1000; | |
2227 | break; | |
2228 | default: | |
2229 | break; | |
2230 | } | |
2231 | ||
2232 | if (tx_rate > speed) { | |
2233 | dev_err(&pf->pdev->dev, "Invalid tx rate %d specified for vf %d.", | |
2234 | tx_rate, vf->vf_id); | |
2235 | ret = -EINVAL; | |
2236 | goto error; | |
2237 | } | |
2238 | ||
2239 | /* Tx rate credits are in values of 50Mbps, 0 is disabled*/ | |
2240 | ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, tx_rate / 50, 0, | |
2241 | NULL); | |
2242 | if (ret) { | |
2243 | dev_err(&pf->pdev->dev, "Unable to set tx rate, error code %d.\n", | |
2244 | ret); | |
2245 | ret = -EIO; | |
2246 | goto error; | |
2247 | } | |
2248 | vf->tx_rate = tx_rate; | |
2249 | error: | |
2250 | return ret; | |
5c3c48ac JB |
2251 | } |
2252 | ||
2253 | /** | |
2254 | * i40e_ndo_get_vf_config | |
2255 | * @netdev: network interface device structure | |
2256 | * @vf_id: vf identifier | |
2257 | * @ivi: vf configuration structure | |
2258 | * | |
2259 | * return vf configuration | |
2260 | **/ | |
2261 | int i40e_ndo_get_vf_config(struct net_device *netdev, | |
2262 | int vf_id, struct ifla_vf_info *ivi) | |
2263 | { | |
2264 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
5c3c48ac JB |
2265 | struct i40e_vsi *vsi = np->vsi; |
2266 | struct i40e_pf *pf = vsi->back; | |
2267 | struct i40e_vf *vf; | |
2268 | int ret = 0; | |
2269 | ||
2270 | /* validate the request */ | |
2271 | if (vf_id >= pf->num_alloc_vfs) { | |
2272 | dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); | |
2273 | ret = -EINVAL; | |
2274 | goto error_param; | |
2275 | } | |
2276 | ||
2277 | vf = &(pf->vf[vf_id]); | |
2278 | /* first vsi is always the LAN vsi */ | |
2279 | vsi = pf->vsi[vf->lan_vsi_index]; | |
2280 | if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { | |
2281 | dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); | |
2282 | ret = -EINVAL; | |
2283 | goto error_param; | |
2284 | } | |
2285 | ||
2286 | ivi->vf = vf_id; | |
2287 | ||
f4a1c5cf | 2288 | memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); |
5c3c48ac | 2289 | |
6b192891 | 2290 | ivi->tx_rate = vf->tx_rate; |
5c3c48ac JB |
2291 | ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; |
2292 | ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> | |
2293 | I40E_VLAN_PRIORITY_SHIFT; | |
84ca55a0 MW |
2294 | if (vf->link_forced == false) |
2295 | ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; | |
2296 | else if (vf->link_up == true) | |
2297 | ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; | |
2298 | else | |
2299 | ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; | |
2300 | ||
5c3c48ac JB |
2301 | ret = 0; |
2302 | ||
2303 | error_param: | |
2304 | return ret; | |
2305 | } | |
588aefa0 MW |
2306 | |
2307 | /** | |
2308 | * i40e_ndo_set_vf_link_state | |
2309 | * @netdev: network interface device structure | |
2310 | * @vf_id: vf identifier | |
2311 | * @link: required link state | |
2312 | * | |
2313 | * Set the link state of a specified VF, regardless of physical link state | |
2314 | **/ | |
2315 | int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) | |
2316 | { | |
2317 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
2318 | struct i40e_pf *pf = np->vsi->back; | |
2319 | struct i40e_virtchnl_pf_event pfe; | |
2320 | struct i40e_hw *hw = &pf->hw; | |
2321 | struct i40e_vf *vf; | |
2322 | int ret = 0; | |
2323 | ||
2324 | /* validate the request */ | |
2325 | if (vf_id >= pf->num_alloc_vfs) { | |
2326 | dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); | |
2327 | ret = -EINVAL; | |
2328 | goto error_out; | |
2329 | } | |
2330 | ||
2331 | vf = &pf->vf[vf_id]; | |
2332 | ||
2333 | pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; | |
2334 | pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; | |
2335 | ||
2336 | switch (link) { | |
2337 | case IFLA_VF_LINK_STATE_AUTO: | |
2338 | vf->link_forced = false; | |
2339 | pfe.event_data.link_event.link_status = | |
2340 | pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; | |
2341 | pfe.event_data.link_event.link_speed = | |
2342 | pf->hw.phy.link_info.link_speed; | |
2343 | break; | |
2344 | case IFLA_VF_LINK_STATE_ENABLE: | |
2345 | vf->link_forced = true; | |
2346 | vf->link_up = true; | |
2347 | pfe.event_data.link_event.link_status = true; | |
2348 | pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; | |
2349 | break; | |
2350 | case IFLA_VF_LINK_STATE_DISABLE: | |
2351 | vf->link_forced = true; | |
2352 | vf->link_up = false; | |
2353 | pfe.event_data.link_event.link_status = false; | |
2354 | pfe.event_data.link_event.link_speed = 0; | |
2355 | break; | |
2356 | default: | |
2357 | ret = -EINVAL; | |
2358 | goto error_out; | |
2359 | } | |
2360 | /* Notify the VF of its new link state */ | |
2361 | i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, | |
2362 | 0, (u8 *)&pfe, sizeof(pfe), NULL); | |
2363 | ||
2364 | error_out: | |
2365 | return ret; | |
2366 | } |