]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/intel/ice/ice_common.c
ice: Code optimization for ice_fill_sw_rule()
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / intel / ice / ice_common.c
CommitLineData
7ec59eea
AV
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_common.h"
9c20346b 5#include "ice_sched.h"
7ec59eea
AV
6#include "ice_adminq_cmd.h"
7
f31e4b6f
AV
8#define ICE_PF_RESET_WAIT_COUNT 200
9
22ef683b
AV
10#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
11 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
cdedef59
AV
12 ((ICE_RX_OPC_MDID << \
13 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
14 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
15 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
16 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
17
22ef683b
AV
18#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
19 wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
cdedef59
AV
20 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
21 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
22 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
23 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
24 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
26 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
28
f31e4b6f
AV
29/**
30 * ice_set_mac_type - Sets MAC type
31 * @hw: pointer to the HW structure
32 *
33 * This function sets the MAC type of the adapter based on the
34 * vendor ID and device ID stored in the hw structure.
35 */
36static enum ice_status ice_set_mac_type(struct ice_hw *hw)
37{
38 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
39 return ICE_ERR_DEVICE_NOT_SUPPORTED;
40
41 hw->mac_type = ICE_MAC_GENERIC;
42 return 0;
43}
44
45/**
46 * ice_clear_pf_cfg - Clear PF configuration
47 * @hw: pointer to the hardware structure
3968540b
AV
48 *
49 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
50 * configuration, flow director filters, etc.).
f31e4b6f
AV
51 */
52enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
53{
54 struct ice_aq_desc desc;
55
56 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
57
58 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
59}
60
dc49c772
AV
61/**
62 * ice_aq_manage_mac_read - manage MAC address read command
63 * @hw: pointer to the hw struct
64 * @buf: a virtual buffer to hold the manage MAC read response
65 * @buf_size: Size of the virtual buffer
66 * @cd: pointer to command details structure or NULL
67 *
68 * This function is used to return per PF station MAC address (0x0107).
69 * NOTE: Upon successful completion of this command, MAC address information
70 * is returned in user specified buffer. Please interpret user specified
71 * buffer as "manage_mac_read" response.
72 * Response such as various MAC addresses are stored in HW struct (port.mac)
73 * ice_aq_discover_caps is expected to be called before this function is called.
74 */
75static enum ice_status
76ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
77 struct ice_sq_cd *cd)
78{
79 struct ice_aqc_manage_mac_read_resp *resp;
80 struct ice_aqc_manage_mac_read *cmd;
81 struct ice_aq_desc desc;
82 enum ice_status status;
83 u16 flags;
d6fef10c 84 u8 i;
dc49c772
AV
85
86 cmd = &desc.params.mac_read;
87
88 if (buf_size < sizeof(*resp))
89 return ICE_ERR_BUF_TOO_SHORT;
90
91 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
92
93 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
94 if (status)
95 return status;
96
97 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
98 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
99
100 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
101 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
102 return ICE_ERR_CFG;
103 }
104
d6fef10c
MFIP
105 /* A single port can report up to two (LAN and WoL) addresses */
106 for (i = 0; i < cmd->num_addr; i++)
107 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
108 ether_addr_copy(hw->port_info->mac.lan_addr,
109 resp[i].mac_addr);
110 ether_addr_copy(hw->port_info->mac.perm_addr,
111 resp[i].mac_addr);
112 break;
113 }
114
dc49c772
AV
115 return 0;
116}
117
118/**
119 * ice_aq_get_phy_caps - returns PHY capabilities
120 * @pi: port information structure
121 * @qual_mods: report qualified modules
122 * @report_mode: report mode capabilities
123 * @pcaps: structure for PHY capabilities to be filled
124 * @cd: pointer to command details structure or NULL
125 *
126 * Returns the various PHY capabilities supported on the Port (0x0600)
127 */
128static enum ice_status
129ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
130 struct ice_aqc_get_phy_caps_data *pcaps,
131 struct ice_sq_cd *cd)
132{
133 struct ice_aqc_get_phy_caps *cmd;
134 u16 pcaps_size = sizeof(*pcaps);
135 struct ice_aq_desc desc;
136 enum ice_status status;
137
138 cmd = &desc.params.get_phy;
139
140 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
141 return ICE_ERR_PARAM;
142
143 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
144
145 if (qual_mods)
146 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
147
148 cmd->param0 |= cpu_to_le16(report_mode);
149 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
150
151 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
152 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
153
154 return status;
155}
156
157/**
158 * ice_get_media_type - Gets media type
159 * @pi: port information structure
160 */
161static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
162{
163 struct ice_link_status *hw_link_info;
164
165 if (!pi)
166 return ICE_MEDIA_UNKNOWN;
167
168 hw_link_info = &pi->phy.link_info;
169
170 if (hw_link_info->phy_type_low) {
171 switch (hw_link_info->phy_type_low) {
172 case ICE_PHY_TYPE_LOW_1000BASE_SX:
173 case ICE_PHY_TYPE_LOW_1000BASE_LX:
174 case ICE_PHY_TYPE_LOW_10GBASE_SR:
175 case ICE_PHY_TYPE_LOW_10GBASE_LR:
176 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
177 case ICE_PHY_TYPE_LOW_25GBASE_SR:
178 case ICE_PHY_TYPE_LOW_25GBASE_LR:
179 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
180 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
181 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
182 return ICE_MEDIA_FIBER;
183 case ICE_PHY_TYPE_LOW_100BASE_TX:
184 case ICE_PHY_TYPE_LOW_1000BASE_T:
185 case ICE_PHY_TYPE_LOW_2500BASE_T:
186 case ICE_PHY_TYPE_LOW_5GBASE_T:
187 case ICE_PHY_TYPE_LOW_10GBASE_T:
188 case ICE_PHY_TYPE_LOW_25GBASE_T:
189 return ICE_MEDIA_BASET;
190 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
191 case ICE_PHY_TYPE_LOW_25GBASE_CR:
192 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
193 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
194 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
195 return ICE_MEDIA_DA;
196 case ICE_PHY_TYPE_LOW_1000BASE_KX:
197 case ICE_PHY_TYPE_LOW_2500BASE_KX:
198 case ICE_PHY_TYPE_LOW_2500BASE_X:
199 case ICE_PHY_TYPE_LOW_5GBASE_KR:
200 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
201 case ICE_PHY_TYPE_LOW_25GBASE_KR:
202 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
203 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
204 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
205 return ICE_MEDIA_BACKPLANE;
206 }
207 }
208
209 return ICE_MEDIA_UNKNOWN;
210}
211
212/**
213 * ice_aq_get_link_info
214 * @pi: port information structure
215 * @ena_lse: enable/disable LinkStatusEvent reporting
216 * @link: pointer to link status structure - optional
217 * @cd: pointer to command details structure or NULL
218 *
219 * Get Link Status (0x607). Returns the link status of the adapter.
220 */
221enum ice_status
222ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
223 struct ice_link_status *link, struct ice_sq_cd *cd)
224{
225 struct ice_link_status *hw_link_info_old, *hw_link_info;
226 struct ice_aqc_get_link_status_data link_data = { 0 };
227 struct ice_aqc_get_link_status *resp;
228 enum ice_media_type *hw_media_type;
229 struct ice_fc_info *hw_fc_info;
230 bool tx_pause, rx_pause;
231 struct ice_aq_desc desc;
232 enum ice_status status;
233 u16 cmd_flags;
234
235 if (!pi)
236 return ICE_ERR_PARAM;
237 hw_link_info_old = &pi->phy.link_info_old;
238 hw_media_type = &pi->phy.media_type;
239 hw_link_info = &pi->phy.link_info;
240 hw_fc_info = &pi->fc;
241
242 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
243 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
244 resp = &desc.params.get_link_status;
245 resp->cmd_flags = cpu_to_le16(cmd_flags);
246 resp->lport_num = pi->lport;
247
248 status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
249 cd);
250
251 if (status)
252 return status;
253
254 /* save off old link status information */
255 *hw_link_info_old = *hw_link_info;
256
257 /* update current link status information */
258 hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
259 hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
260 *hw_media_type = ice_get_media_type(pi);
261 hw_link_info->link_info = link_data.link_info;
262 hw_link_info->an_info = link_data.an_info;
263 hw_link_info->ext_info = link_data.ext_info;
264 hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
265 hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
266
267 /* update fc info */
268 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
269 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
270 if (tx_pause && rx_pause)
271 hw_fc_info->current_mode = ICE_FC_FULL;
272 else if (tx_pause)
273 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
274 else if (rx_pause)
275 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
276 else
277 hw_fc_info->current_mode = ICE_FC_NONE;
278
279 hw_link_info->lse_ena =
280 !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
281
282 /* save link status information */
283 if (link)
284 *link = *hw_link_info;
285
286 /* flag cleared so calling functions don't call AQ again */
287 pi->phy.get_link_info = false;
288
289 return status;
290}
291
cdedef59 292/**
22ef683b 293 * ice_init_flex_flags
cdedef59 294 * @hw: pointer to the hardware structure
22ef683b 295 * @prof_id: Rx Descriptor Builder profile ID
cdedef59 296 *
22ef683b 297 * Function to initialize Rx flex flags
cdedef59 298 */
22ef683b 299static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
cdedef59
AV
300{
301 u8 idx = 0;
302
22ef683b
AV
303 /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
304 * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
305 * flexiflags1[3:0] - Not used for flag programming
306 * flexiflags2[7:0] - Tunnel and VLAN types
307 * 2 invalid fields in last index
308 */
309 switch (prof_id) {
310 /* Rx flex flags are currently programmed for the NIC profiles only.
311 * Different flag bit programming configurations can be added per
312 * profile as needed.
313 */
314 case ICE_RXDID_FLEX_NIC:
315 case ICE_RXDID_FLEX_NIC_2:
316 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
317 ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
318 ICE_RXFLG_FIN, idx++);
319 /* flex flag 1 is not used for flexi-flag programming, skipping
320 * these four FLG64 bits.
321 */
322 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
323 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
324 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
325 ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
326 ICE_RXFLG_EVLAN_x9100, idx++);
327 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
328 ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
329 ICE_RXFLG_TNL0, idx++);
330 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
331 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
332 break;
333
334 default:
335 ice_debug(hw, ICE_DBG_INIT,
336 "Flag programming for profile ID %d not supported\n",
337 prof_id);
338 }
339}
340
341/**
342 * ice_init_flex_flds
343 * @hw: pointer to the hardware structure
344 * @prof_id: Rx Descriptor Builder profile ID
345 *
346 * Function to initialize flex descriptors
347 */
348static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
349{
350 enum ice_flex_rx_mdid mdid;
351
352 switch (prof_id) {
353 case ICE_RXDID_FLEX_NIC:
354 case ICE_RXDID_FLEX_NIC_2:
355 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
356 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
357 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
358
359 mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
360 ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
361
362 ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
363
364 ice_init_flex_flags(hw, prof_id);
365 break;
366
367 default:
368 ice_debug(hw, ICE_DBG_INIT,
369 "Field init for profile ID %d not supported\n",
370 prof_id);
371 }
cdedef59
AV
372}
373
9daf8208
AV
374/**
375 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
376 * @hw: pointer to the hw struct
377 */
378static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
379{
380 struct ice_switch_info *sw;
381
382 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
383 sizeof(*hw->switch_info), GFP_KERNEL);
384 sw = hw->switch_info;
385
386 if (!sw)
387 return ICE_ERR_NO_MEMORY;
388
389 INIT_LIST_HEAD(&sw->vsi_list_map_head);
390
391 mutex_init(&sw->mac_list_lock);
392 INIT_LIST_HEAD(&sw->mac_list_head);
393
394 mutex_init(&sw->vlan_list_lock);
395 INIT_LIST_HEAD(&sw->vlan_list_head);
396
397 mutex_init(&sw->eth_m_list_lock);
398 INIT_LIST_HEAD(&sw->eth_m_list_head);
399
400 mutex_init(&sw->promisc_list_lock);
401 INIT_LIST_HEAD(&sw->promisc_list_head);
402
403 mutex_init(&sw->mac_vlan_list_lock);
404 INIT_LIST_HEAD(&sw->mac_vlan_list_head);
405
406 return 0;
407}
408
409/**
410 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
411 * @hw: pointer to the hw struct
412 */
413static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
414{
415 struct ice_switch_info *sw = hw->switch_info;
416 struct ice_vsi_list_map_info *v_pos_map;
417 struct ice_vsi_list_map_info *v_tmp_map;
418
419 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
420 list_entry) {
421 list_del(&v_pos_map->list_entry);
422 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
423 }
424
425 mutex_destroy(&sw->mac_list_lock);
426 mutex_destroy(&sw->vlan_list_lock);
427 mutex_destroy(&sw->eth_m_list_lock);
428 mutex_destroy(&sw->promisc_list_lock);
429 mutex_destroy(&sw->mac_vlan_list_lock);
430
431 devm_kfree(ice_hw_to_dev(hw), sw);
432}
433
f31e4b6f
AV
434/**
435 * ice_init_hw - main hardware initialization routine
436 * @hw: pointer to the hardware structure
437 */
438enum ice_status ice_init_hw(struct ice_hw *hw)
439{
dc49c772 440 struct ice_aqc_get_phy_caps_data *pcaps;
f31e4b6f 441 enum ice_status status;
dc49c772
AV
442 u16 mac_buf_len;
443 void *mac_buf;
f31e4b6f
AV
444
445 /* Set MAC type based on DeviceID */
446 status = ice_set_mac_type(hw);
447 if (status)
448 return status;
449
450 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
451 PF_FUNC_RID_FUNC_NUM_M) >>
452 PF_FUNC_RID_FUNC_NUM_S;
453
454 status = ice_reset(hw, ICE_RESET_PFR);
455 if (status)
456 return status;
457
940b61af
AV
458 /* set these values to minimum allowed */
459 hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200;
460 hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100;
461 hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50;
462 hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25;
463
f31e4b6f
AV
464 status = ice_init_all_ctrlq(hw);
465 if (status)
466 goto err_unroll_cqinit;
467
468 status = ice_clear_pf_cfg(hw);
469 if (status)
470 goto err_unroll_cqinit;
471
472 ice_clear_pxe_mode(hw);
473
474 status = ice_init_nvm(hw);
475 if (status)
476 goto err_unroll_cqinit;
477
9c20346b
AV
478 status = ice_get_caps(hw);
479 if (status)
480 goto err_unroll_cqinit;
481
482 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
483 sizeof(*hw->port_info), GFP_KERNEL);
484 if (!hw->port_info) {
485 status = ICE_ERR_NO_MEMORY;
486 goto err_unroll_cqinit;
487 }
488
489 /* set the back pointer to hw */
490 hw->port_info->hw = hw;
491
492 /* Initialize port_info struct with switch configuration data */
493 status = ice_get_initial_sw_cfg(hw);
494 if (status)
495 goto err_unroll_alloc;
496
9daf8208
AV
497 hw->evb_veb = true;
498
9c20346b
AV
499 /* Query the allocated resources for tx scheduler */
500 status = ice_sched_query_res_alloc(hw);
501 if (status) {
502 ice_debug(hw, ICE_DBG_SCHED,
503 "Failed to get scheduler allocated resources\n");
504 goto err_unroll_alloc;
505 }
506
dc49c772
AV
507 /* Initialize port_info struct with scheduler data */
508 status = ice_sched_init_port(hw->port_info);
509 if (status)
510 goto err_unroll_sched;
511
512 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
513 if (!pcaps) {
514 status = ICE_ERR_NO_MEMORY;
515 goto err_unroll_sched;
516 }
517
518 /* Initialize port_info struct with PHY capabilities */
519 status = ice_aq_get_phy_caps(hw->port_info, false,
520 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
521 devm_kfree(ice_hw_to_dev(hw), pcaps);
522 if (status)
523 goto err_unroll_sched;
524
525 /* Initialize port_info struct with link information */
526 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
527 if (status)
528 goto err_unroll_sched;
529
b36c598c
AV
530 /* need a valid SW entry point to build a Tx tree */
531 if (!hw->sw_entry_point_layer) {
532 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
533 status = ICE_ERR_CFG;
534 goto err_unroll_sched;
535 }
536
9daf8208
AV
537 status = ice_init_fltr_mgmt_struct(hw);
538 if (status)
539 goto err_unroll_sched;
540
d6fef10c
MFIP
541 /* Get MAC information */
542 /* A single port can report up to two (LAN and WoL) addresses */
543 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
544 sizeof(struct ice_aqc_manage_mac_read_resp),
545 GFP_KERNEL);
546 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
dc49c772 547
63bb4e1e
WY
548 if (!mac_buf) {
549 status = ICE_ERR_NO_MEMORY;
9daf8208 550 goto err_unroll_fltr_mgmt_struct;
63bb4e1e 551 }
dc49c772
AV
552
553 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
554 devm_kfree(ice_hw_to_dev(hw), mac_buf);
555
556 if (status)
9daf8208 557 goto err_unroll_fltr_mgmt_struct;
dc49c772 558
22ef683b
AV
559 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
560 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
cdedef59 561
f31e4b6f
AV
562 return 0;
563
9daf8208
AV
564err_unroll_fltr_mgmt_struct:
565 ice_cleanup_fltr_mgmt_struct(hw);
dc49c772
AV
566err_unroll_sched:
567 ice_sched_cleanup_all(hw);
9c20346b
AV
568err_unroll_alloc:
569 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
f31e4b6f
AV
570err_unroll_cqinit:
571 ice_shutdown_all_ctrlq(hw);
572 return status;
573}
574
575/**
576 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
577 * @hw: pointer to the hardware structure
578 */
579void ice_deinit_hw(struct ice_hw *hw)
580{
9c20346b 581 ice_sched_cleanup_all(hw);
f31e4b6f 582 ice_shutdown_all_ctrlq(hw);
dc49c772 583
9c20346b
AV
584 if (hw->port_info) {
585 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
586 hw->port_info = NULL;
587 }
9daf8208
AV
588
589 ice_cleanup_fltr_mgmt_struct(hw);
f31e4b6f
AV
590}
591
592/**
593 * ice_check_reset - Check to see if a global reset is complete
594 * @hw: pointer to the hardware structure
595 */
596enum ice_status ice_check_reset(struct ice_hw *hw)
597{
598 u32 cnt, reg = 0, grst_delay;
599
600 /* Poll for Device Active state in case a recent CORER, GLOBR,
601 * or EMPR has occurred. The grst delay value is in 100ms units.
602 * Add 1sec for outstanding AQ commands that can take a long time.
603 */
604 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
605 GLGEN_RSTCTL_GRSTDEL_S) + 10;
606
607 for (cnt = 0; cnt < grst_delay; cnt++) {
608 mdelay(100);
609 reg = rd32(hw, GLGEN_RSTAT);
610 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
611 break;
612 }
613
614 if (cnt == grst_delay) {
615 ice_debug(hw, ICE_DBG_INIT,
616 "Global reset polling failed to complete.\n");
617 return ICE_ERR_RESET_FAILED;
618 }
619
620#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
621 GLNVM_ULD_GLOBR_DONE_M)
622
623 /* Device is Active; check Global Reset processes are done */
624 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
625 reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
626 if (reg == ICE_RESET_DONE_MASK) {
627 ice_debug(hw, ICE_DBG_INIT,
628 "Global reset processes done. %d\n", cnt);
629 break;
630 }
631 mdelay(10);
632 }
633
634 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
635 ice_debug(hw, ICE_DBG_INIT,
636 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
637 reg);
638 return ICE_ERR_RESET_FAILED;
639 }
640
641 return 0;
642}
643
644/**
645 * ice_pf_reset - Reset the PF
646 * @hw: pointer to the hardware structure
647 *
648 * If a global reset has been triggered, this function checks
649 * for its completion and then issues the PF reset
650 */
651static enum ice_status ice_pf_reset(struct ice_hw *hw)
652{
653 u32 cnt, reg;
654
655 /* If at function entry a global reset was already in progress, i.e.
656 * state is not 'device active' or any of the reset done bits are not
657 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
658 * global reset is done.
659 */
660 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
661 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
662 /* poll on global reset currently in progress until done */
663 if (ice_check_reset(hw))
664 return ICE_ERR_RESET_FAILED;
665
666 return 0;
667 }
668
669 /* Reset the PF */
670 reg = rd32(hw, PFGEN_CTRL);
671
672 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
673
674 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
675 reg = rd32(hw, PFGEN_CTRL);
676 if (!(reg & PFGEN_CTRL_PFSWR_M))
677 break;
678
679 mdelay(1);
680 }
681
682 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
683 ice_debug(hw, ICE_DBG_INIT,
684 "PF reset polling failed to complete.\n");
685 return ICE_ERR_RESET_FAILED;
686 }
687
688 return 0;
689}
690
691/**
692 * ice_reset - Perform different types of reset
693 * @hw: pointer to the hardware structure
694 * @req: reset request
695 *
696 * This function triggers a reset as specified by the req parameter.
697 *
698 * Note:
699 * If anything other than a PF reset is triggered, PXE mode is restored.
700 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
701 * interface has been restored in the rebuild flow.
702 */
703enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
704{
705 u32 val = 0;
706
707 switch (req) {
708 case ICE_RESET_PFR:
709 return ice_pf_reset(hw);
710 case ICE_RESET_CORER:
711 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
712 val = GLGEN_RTRIG_CORER_M;
713 break;
714 case ICE_RESET_GLOBR:
715 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
716 val = GLGEN_RTRIG_GLOBR_M;
717 break;
718 }
719
720 val |= rd32(hw, GLGEN_RTRIG);
721 wr32(hw, GLGEN_RTRIG, val);
722 ice_flush(hw);
723
724 /* wait for the FW to be ready */
725 return ice_check_reset(hw);
726}
727
cdedef59
AV
728/**
729 * ice_copy_rxq_ctx_to_hw
730 * @hw: pointer to the hardware structure
731 * @ice_rxq_ctx: pointer to the rxq context
732 * @rxq_index: the index of the rx queue
733 *
734 * Copies rxq context from dense structure to hw register space
735 */
736static enum ice_status
737ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
738{
739 u8 i;
740
741 if (!ice_rxq_ctx)
742 return ICE_ERR_BAD_PTR;
743
744 if (rxq_index > QRX_CTRL_MAX_INDEX)
745 return ICE_ERR_PARAM;
746
747 /* Copy each dword separately to hw */
748 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
749 wr32(hw, QRX_CONTEXT(i, rxq_index),
750 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
751
752 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
753 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
754 }
755
756 return 0;
757}
758
759/* LAN Rx Queue Context */
760static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
761 /* Field Width LSB */
762 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
763 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
764 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
765 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
766 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
767 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
768 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
769 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
770 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
771 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
772 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
773 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
774 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
775 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
776 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
777 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
778 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
779 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
780 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
781 { 0 }
782};
783
784/**
785 * ice_write_rxq_ctx
786 * @hw: pointer to the hardware structure
787 * @rlan_ctx: pointer to the rxq context
788 * @rxq_index: the index of the rx queue
789 *
790 * Converts rxq context from sparse to dense structure and then writes
791 * it to hw register space
792 */
793enum ice_status
794ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
795 u32 rxq_index)
796{
797 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
798
799 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
800 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
801}
802
803/* LAN Tx Queue Context */
804const struct ice_ctx_ele ice_tlan_ctx_info[] = {
805 /* Field Width LSB */
806 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
807 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
808 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
809 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
810 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
811 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
812 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
813 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
814 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
815 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
816 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
817 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
818 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
819 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
820 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
821 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
822 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
823 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
824 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
825 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
826 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
827 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
828 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
829 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
830 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
831 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
832 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171),
833 { 0 }
834};
835
7ec59eea
AV
836/**
837 * ice_debug_cq
838 * @hw: pointer to the hardware structure
839 * @mask: debug mask
840 * @desc: pointer to control queue descriptor
841 * @buf: pointer to command buffer
842 * @buf_len: max length of buf
843 *
844 * Dumps debug log about control command with descriptor contents.
845 */
846void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
847 void *buf, u16 buf_len)
848{
849 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
850 u16 len;
851
852#ifndef CONFIG_DYNAMIC_DEBUG
853 if (!(mask & hw->debug_mask))
854 return;
855#endif
856
857 if (!desc)
858 return;
859
860 len = le16_to_cpu(cq_desc->datalen);
861
862 ice_debug(hw, mask,
863 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
864 le16_to_cpu(cq_desc->opcode),
865 le16_to_cpu(cq_desc->flags),
866 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
867 ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
868 le32_to_cpu(cq_desc->cookie_high),
869 le32_to_cpu(cq_desc->cookie_low));
870 ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
871 le32_to_cpu(cq_desc->params.generic.param0),
872 le32_to_cpu(cq_desc->params.generic.param1));
873 ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
874 le32_to_cpu(cq_desc->params.generic.addr_high),
875 le32_to_cpu(cq_desc->params.generic.addr_low));
876 if (buf && cq_desc->datalen != 0) {
877 ice_debug(hw, mask, "Buffer:\n");
878 if (buf_len < len)
879 len = buf_len;
880
881 ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
882 }
883}
884
885/* FW Admin Queue command wrappers */
886
887/**
888 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
889 * @hw: pointer to the hw struct
890 * @desc: descriptor describing the command
891 * @buf: buffer to use for indirect commands (NULL for direct commands)
892 * @buf_size: size of buffer for indirect commands (0 for direct commands)
893 * @cd: pointer to command details structure
894 *
895 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
896 */
897enum ice_status
898ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
899 u16 buf_size, struct ice_sq_cd *cd)
900{
901 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
902}
903
904/**
905 * ice_aq_get_fw_ver
906 * @hw: pointer to the hw struct
907 * @cd: pointer to command details structure or NULL
908 *
909 * Get the firmware version (0x0001) from the admin queue commands
910 */
911enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
912{
913 struct ice_aqc_get_ver *resp;
914 struct ice_aq_desc desc;
915 enum ice_status status;
916
917 resp = &desc.params.get_ver;
918
919 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
920
921 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
922
923 if (!status) {
924 hw->fw_branch = resp->fw_branch;
925 hw->fw_maj_ver = resp->fw_major;
926 hw->fw_min_ver = resp->fw_minor;
927 hw->fw_patch = resp->fw_patch;
928 hw->fw_build = le32_to_cpu(resp->fw_build);
929 hw->api_branch = resp->api_branch;
930 hw->api_maj_ver = resp->api_major;
931 hw->api_min_ver = resp->api_minor;
932 hw->api_patch = resp->api_patch;
933 }
934
935 return status;
936}
937
938/**
939 * ice_aq_q_shutdown
940 * @hw: pointer to the hw struct
941 * @unloading: is the driver unloading itself
942 *
943 * Tell the Firmware that we're shutting down the AdminQ and whether
944 * or not the driver is unloading as well (0x0003).
945 */
946enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
947{
948 struct ice_aqc_q_shutdown *cmd;
949 struct ice_aq_desc desc;
950
951 cmd = &desc.params.q_shutdown;
952
953 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
954
955 if (unloading)
956 cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
957
958 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
959}
f31e4b6f
AV
960
961/**
962 * ice_aq_req_res
963 * @hw: pointer to the hw struct
964 * @res: resource id
965 * @access: access type
966 * @sdp_number: resource number
967 * @timeout: the maximum time in ms that the driver may hold the resource
968 * @cd: pointer to command details structure or NULL
969 *
ff2b1321
DN
970 * Requests common resource using the admin queue commands (0x0008).
971 * When attempting to acquire the Global Config Lock, the driver can
972 * learn of three states:
973 * 1) ICE_SUCCESS - acquired lock, and can perform download package
974 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
975 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
976 * successfully downloaded the package; the driver does
977 * not have to download the package and can continue
978 * loading
979 *
980 * Note that if the caller is in an acquire lock, perform action, release lock
981 * phase of operation, it is possible that the FW may detect a timeout and issue
982 * a CORER. In this case, the driver will receive a CORER interrupt and will
983 * have to determine its cause. The calling thread that is handling this flow
984 * will likely get an error propagated back to it indicating the Download
985 * Package, Update Package or the Release Resource AQ commands timed out.
f31e4b6f
AV
986 */
987static enum ice_status
988ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
989 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
990 struct ice_sq_cd *cd)
991{
992 struct ice_aqc_req_res *cmd_resp;
993 struct ice_aq_desc desc;
994 enum ice_status status;
995
996 cmd_resp = &desc.params.res_owner;
997
998 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
999
1000 cmd_resp->res_id = cpu_to_le16(res);
1001 cmd_resp->access_type = cpu_to_le16(access);
1002 cmd_resp->res_number = cpu_to_le32(sdp_number);
ff2b1321
DN
1003 cmd_resp->timeout = cpu_to_le32(*timeout);
1004 *timeout = 0;
f31e4b6f
AV
1005
1006 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
ff2b1321 1007
f31e4b6f
AV
1008 /* The completion specifies the maximum time in ms that the driver
1009 * may hold the resource in the Timeout field.
ff2b1321
DN
1010 */
1011
1012 /* Global config lock response utilizes an additional status field.
1013 *
1014 * If the Global config lock resource is held by some other driver, the
1015 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1016 * and the timeout field indicates the maximum time the current owner
1017 * of the resource has to free it.
1018 */
1019 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1020 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1021 *timeout = le32_to_cpu(cmd_resp->timeout);
1022 return 0;
1023 } else if (le16_to_cpu(cmd_resp->status) ==
1024 ICE_AQ_RES_GLBL_IN_PROG) {
1025 *timeout = le32_to_cpu(cmd_resp->timeout);
1026 return ICE_ERR_AQ_ERROR;
1027 } else if (le16_to_cpu(cmd_resp->status) ==
1028 ICE_AQ_RES_GLBL_DONE) {
1029 return ICE_ERR_AQ_NO_WORK;
1030 }
1031
1032 /* invalid FW response, force a timeout immediately */
1033 *timeout = 0;
1034 return ICE_ERR_AQ_ERROR;
1035 }
1036
1037 /* If the resource is held by some other driver, the command completes
1038 * with a busy return value and the timeout field indicates the maximum
1039 * time the current owner of the resource has to free it.
f31e4b6f
AV
1040 */
1041 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1042 *timeout = le32_to_cpu(cmd_resp->timeout);
1043
1044 return status;
1045}
1046
1047/**
1048 * ice_aq_release_res
1049 * @hw: pointer to the hw struct
1050 * @res: resource id
1051 * @sdp_number: resource number
1052 * @cd: pointer to command details structure or NULL
1053 *
1054 * release common resource using the admin queue commands (0x0009)
1055 */
1056static enum ice_status
1057ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1058 struct ice_sq_cd *cd)
1059{
1060 struct ice_aqc_req_res *cmd;
1061 struct ice_aq_desc desc;
1062
1063 cmd = &desc.params.res_owner;
1064
1065 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1066
1067 cmd->res_id = cpu_to_le16(res);
1068 cmd->res_number = cpu_to_le32(sdp_number);
1069
1070 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1071}
1072
1073/**
1074 * ice_acquire_res
1075 * @hw: pointer to the HW structure
1076 * @res: resource id
1077 * @access: access type (read or write)
ff2b1321 1078 * @timeout: timeout in milliseconds
f31e4b6f
AV
1079 *
1080 * This function will attempt to acquire the ownership of a resource.
1081 */
1082enum ice_status
1083ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
ff2b1321 1084 enum ice_aq_res_access_type access, u32 timeout)
f31e4b6f
AV
1085{
1086#define ICE_RES_POLLING_DELAY_MS 10
1087 u32 delay = ICE_RES_POLLING_DELAY_MS;
ff2b1321 1088 u32 time_left = timeout;
f31e4b6f 1089 enum ice_status status;
f31e4b6f
AV
1090
1091 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1092
ff2b1321
DN
1093 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1094 * previously acquired the resource and performed any necessary updates;
1095 * in this case the caller does not obtain the resource and has no
1096 * further work to do.
f31e4b6f 1097 */
ff2b1321 1098 if (status == ICE_ERR_AQ_NO_WORK)
f31e4b6f 1099 goto ice_acquire_res_exit;
f31e4b6f
AV
1100
1101 if (status)
1102 ice_debug(hw, ICE_DBG_RES,
1103 "resource %d acquire type %d failed.\n", res, access);
1104
1105 /* If necessary, poll until the current lock owner timeouts */
1106 timeout = time_left;
1107 while (status && timeout && time_left) {
1108 mdelay(delay);
1109 timeout = (timeout > delay) ? timeout - delay : 0;
1110 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1111
ff2b1321 1112 if (status == ICE_ERR_AQ_NO_WORK)
f31e4b6f 1113 /* lock free, but no work to do */
f31e4b6f 1114 break;
f31e4b6f
AV
1115
1116 if (!status)
1117 /* lock acquired */
1118 break;
1119 }
1120 if (status && status != ICE_ERR_AQ_NO_WORK)
1121 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1122
1123ice_acquire_res_exit:
1124 if (status == ICE_ERR_AQ_NO_WORK) {
1125 if (access == ICE_RES_WRITE)
1126 ice_debug(hw, ICE_DBG_RES,
1127 "resource indicates no work to do.\n");
1128 else
1129 ice_debug(hw, ICE_DBG_RES,
1130 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1131 }
1132 return status;
1133}
1134
1135/**
1136 * ice_release_res
1137 * @hw: pointer to the HW structure
1138 * @res: resource id
1139 *
1140 * This function will release a resource using the proper Admin Command.
1141 */
1142void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1143{
1144 enum ice_status status;
1145 u32 total_delay = 0;
1146
1147 status = ice_aq_release_res(hw, res, 0, NULL);
1148
1149 /* there are some rare cases when trying to release the resource
1150 * results in an admin Q timeout, so handle them correctly
1151 */
1152 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1153 (total_delay < hw->adminq.sq_cmd_timeout)) {
1154 mdelay(1);
1155 status = ice_aq_release_res(hw, res, 0, NULL);
1156 total_delay++;
1157 }
1158}
1159
9c20346b
AV
1160/**
1161 * ice_parse_caps - parse function/device capabilities
1162 * @hw: pointer to the hw struct
1163 * @buf: pointer to a buffer containing function/device capability records
1164 * @cap_count: number of capability records in the list
1165 * @opc: type of capabilities list to parse
1166 *
1167 * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1168 */
1169static void
1170ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1171 enum ice_adminq_opc opc)
1172{
1173 struct ice_aqc_list_caps_elem *cap_resp;
1174 struct ice_hw_func_caps *func_p = NULL;
1175 struct ice_hw_dev_caps *dev_p = NULL;
1176 struct ice_hw_common_caps *caps;
1177 u32 i;
1178
1179 if (!buf)
1180 return;
1181
1182 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1183
1184 if (opc == ice_aqc_opc_list_dev_caps) {
1185 dev_p = &hw->dev_caps;
1186 caps = &dev_p->common_cap;
1187 } else if (opc == ice_aqc_opc_list_func_caps) {
1188 func_p = &hw->func_caps;
1189 caps = &func_p->common_cap;
1190 } else {
1191 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1192 return;
1193 }
1194
1195 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1196 u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1197 u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1198 u32 number = le32_to_cpu(cap_resp->number);
1199 u16 cap = le16_to_cpu(cap_resp->cap);
1200
1201 switch (cap) {
1202 case ICE_AQC_CAPS_VSI:
1203 if (dev_p) {
1204 dev_p->num_vsi_allocd_to_host = number;
1205 ice_debug(hw, ICE_DBG_INIT,
1206 "HW caps: Dev.VSI cnt = %d\n",
1207 dev_p->num_vsi_allocd_to_host);
1208 } else if (func_p) {
1209 func_p->guaranteed_num_vsi = number;
1210 ice_debug(hw, ICE_DBG_INIT,
1211 "HW caps: Func.VSI cnt = %d\n",
1212 func_p->guaranteed_num_vsi);
1213 }
1214 break;
1215 case ICE_AQC_CAPS_RSS:
1216 caps->rss_table_size = number;
1217 caps->rss_table_entry_width = logical_id;
1218 ice_debug(hw, ICE_DBG_INIT,
1219 "HW caps: RSS table size = %d\n",
1220 caps->rss_table_size);
1221 ice_debug(hw, ICE_DBG_INIT,
1222 "HW caps: RSS table width = %d\n",
1223 caps->rss_table_entry_width);
1224 break;
1225 case ICE_AQC_CAPS_RXQS:
1226 caps->num_rxq = number;
1227 caps->rxq_first_id = phys_id;
1228 ice_debug(hw, ICE_DBG_INIT,
1229 "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
1230 ice_debug(hw, ICE_DBG_INIT,
1231 "HW caps: Rx first queue ID = %d\n",
1232 caps->rxq_first_id);
1233 break;
1234 case ICE_AQC_CAPS_TXQS:
1235 caps->num_txq = number;
1236 caps->txq_first_id = phys_id;
1237 ice_debug(hw, ICE_DBG_INIT,
1238 "HW caps: Num Tx Qs = %d\n", caps->num_txq);
1239 ice_debug(hw, ICE_DBG_INIT,
1240 "HW caps: Tx first queue ID = %d\n",
1241 caps->txq_first_id);
1242 break;
1243 case ICE_AQC_CAPS_MSIX:
1244 caps->num_msix_vectors = number;
1245 caps->msix_vector_first_id = phys_id;
1246 ice_debug(hw, ICE_DBG_INIT,
1247 "HW caps: MSIX vector count = %d\n",
1248 caps->num_msix_vectors);
1249 ice_debug(hw, ICE_DBG_INIT,
1250 "HW caps: MSIX first vector index = %d\n",
1251 caps->msix_vector_first_id);
1252 break;
1253 case ICE_AQC_CAPS_MAX_MTU:
1254 caps->max_mtu = number;
1255 if (dev_p)
1256 ice_debug(hw, ICE_DBG_INIT,
1257 "HW caps: Dev.MaxMTU = %d\n",
1258 caps->max_mtu);
1259 else if (func_p)
1260 ice_debug(hw, ICE_DBG_INIT,
1261 "HW caps: func.MaxMTU = %d\n",
1262 caps->max_mtu);
1263 break;
1264 default:
1265 ice_debug(hw, ICE_DBG_INIT,
1266 "HW caps: Unknown capability[%d]: 0x%x\n", i,
1267 cap);
1268 break;
1269 }
1270 }
1271}
1272
1273/**
1274 * ice_aq_discover_caps - query function/device capabilities
1275 * @hw: pointer to the hw struct
1276 * @buf: a virtual buffer to hold the capabilities
1277 * @buf_size: Size of the virtual buffer
1278 * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM
1279 * @opc: capabilities type to discover - pass in the command opcode
1280 * @cd: pointer to command details structure or NULL
1281 *
1282 * Get the function(0x000a)/device(0x000b) capabilities description from
1283 * the firmware.
1284 */
1285static enum ice_status
1286ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
1287 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1288{
1289 struct ice_aqc_list_caps *cmd;
1290 struct ice_aq_desc desc;
1291 enum ice_status status;
1292
1293 cmd = &desc.params.get_cap;
1294
1295 if (opc != ice_aqc_opc_list_func_caps &&
1296 opc != ice_aqc_opc_list_dev_caps)
1297 return ICE_ERR_PARAM;
1298
1299 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1300
1301 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1302 if (!status)
1303 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1304 *data_size = le16_to_cpu(desc.datalen);
1305
1306 return status;
1307}
1308
1309/**
1310 * ice_get_caps - get info about the HW
1311 * @hw: pointer to the hardware structure
1312 */
1313enum ice_status ice_get_caps(struct ice_hw *hw)
1314{
1315 enum ice_status status;
1316 u16 data_size = 0;
1317 u16 cbuf_len;
1318 u8 retries;
1319
1320 /* The driver doesn't know how many capabilities the device will return
1321 * so the buffer size required isn't known ahead of time. The driver
1322 * starts with cbuf_len and if this turns out to be insufficient, the
1323 * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.
1324 * The driver then allocates the buffer of this size and retries the
1325 * operation. So it follows that the retry count is 2.
1326 */
1327#define ICE_GET_CAP_BUF_COUNT 40
1328#define ICE_GET_CAP_RETRY_COUNT 2
1329
1330 cbuf_len = ICE_GET_CAP_BUF_COUNT *
1331 sizeof(struct ice_aqc_list_caps_elem);
1332
1333 retries = ICE_GET_CAP_RETRY_COUNT;
1334
1335 do {
1336 void *cbuf;
1337
1338 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1339 if (!cbuf)
1340 return ICE_ERR_NO_MEMORY;
1341
1342 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,
1343 ice_aqc_opc_list_func_caps, NULL);
1344 devm_kfree(ice_hw_to_dev(hw), cbuf);
1345
1346 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1347 break;
1348
1349 /* If ENOMEM is returned, try again with bigger buffer */
1350 cbuf_len = data_size;
1351 } while (--retries);
1352
1353 return status;
1354}
1355
e94d4478
AV
1356/**
1357 * ice_aq_manage_mac_write - manage MAC address write command
1358 * @hw: pointer to the hw struct
1359 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
1360 * @flags: flags to control write behavior
1361 * @cd: pointer to command details structure or NULL
1362 *
1363 * This function is used to write MAC address to the NVM (0x0108).
1364 */
1365enum ice_status
1366ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
1367 struct ice_sq_cd *cd)
1368{
1369 struct ice_aqc_manage_mac_write *cmd;
1370 struct ice_aq_desc desc;
1371
1372 cmd = &desc.params.mac_write;
1373 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1374
1375 cmd->flags = flags;
1376
1377 /* Prep values for flags, sah, sal */
1378 cmd->sah = htons(*((u16 *)mac_addr));
1379 cmd->sal = htonl(*((u32 *)(mac_addr + 2)));
1380
1381 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1382}
1383
f31e4b6f
AV
1384/**
1385 * ice_aq_clear_pxe_mode
1386 * @hw: pointer to the hw struct
1387 *
1388 * Tell the firmware that the driver is taking over from PXE (0x0110).
1389 */
1390static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1391{
1392 struct ice_aq_desc desc;
1393
1394 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1395 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1396
1397 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1398}
1399
1400/**
1401 * ice_clear_pxe_mode - clear pxe operations mode
1402 * @hw: pointer to the hw struct
1403 *
1404 * Make sure all PXE mode settings are cleared, including things
1405 * like descriptor fetch/write-back mode.
1406 */
1407void ice_clear_pxe_mode(struct ice_hw *hw)
1408{
1409 if (ice_check_sq_alive(hw, &hw->adminq))
1410 ice_aq_clear_pxe_mode(hw);
1411}
cdedef59 1412
fcea6f3d
AV
1413/**
1414 * ice_aq_set_phy_cfg
1415 * @hw: pointer to the hw struct
1416 * @lport: logical port number
1417 * @cfg: structure with PHY configuration data to be set
1418 * @cd: pointer to command details structure or NULL
1419 *
1420 * Set the various PHY configuration parameters supported on the Port.
1421 * One or more of the Set PHY config parameters may be ignored in an MFP
1422 * mode as the PF may not have the privilege to set some of the PHY Config
1423 * parameters. This status will be indicated by the command response (0x0601).
1424 */
1425static enum ice_status
1426ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
1427 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
1428{
1429 struct ice_aqc_set_phy_cfg *cmd;
1430 struct ice_aq_desc desc;
1431
1432 if (!cfg)
1433 return ICE_ERR_PARAM;
1434
1435 cmd = &desc.params.set_phy;
1436 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
1437 cmd->lport_num = lport;
1438
1439 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
1440}
1441
1442/**
1443 * ice_update_link_info - update status of the HW network link
1444 * @pi: port info structure of the interested logical port
1445 */
1446static enum ice_status
1447ice_update_link_info(struct ice_port_info *pi)
1448{
1449 struct ice_aqc_get_phy_caps_data *pcaps;
1450 struct ice_phy_info *phy_info;
1451 enum ice_status status;
1452 struct ice_hw *hw;
1453
1454 if (!pi)
1455 return ICE_ERR_PARAM;
1456
1457 hw = pi->hw;
1458
1459 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1460 if (!pcaps)
1461 return ICE_ERR_NO_MEMORY;
1462
1463 phy_info = &pi->phy;
1464 status = ice_aq_get_link_info(pi, true, NULL, NULL);
1465 if (status)
1466 goto out;
1467
1468 if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
1469 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
1470 pcaps, NULL);
1471 if (status)
1472 goto out;
1473
1474 memcpy(phy_info->link_info.module_type, &pcaps->module_type,
1475 sizeof(phy_info->link_info.module_type));
1476 }
1477out:
1478 devm_kfree(ice_hw_to_dev(hw), pcaps);
1479 return status;
1480}
1481
1482/**
1483 * ice_set_fc
1484 * @pi: port information structure
1485 * @aq_failures: pointer to status code, specific to ice_set_fc routine
1486 * @atomic_restart: enable automatic link update
1487 *
1488 * Set the requested flow control mode.
1489 */
1490enum ice_status
1491ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart)
1492{
1493 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
1494 struct ice_aqc_get_phy_caps_data *pcaps;
1495 enum ice_status status;
1496 u8 pause_mask = 0x0;
1497 struct ice_hw *hw;
1498
1499 if (!pi)
1500 return ICE_ERR_PARAM;
1501 hw = pi->hw;
1502 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
1503
1504 switch (pi->fc.req_mode) {
1505 case ICE_FC_FULL:
1506 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
1507 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
1508 break;
1509 case ICE_FC_RX_PAUSE:
1510 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
1511 break;
1512 case ICE_FC_TX_PAUSE:
1513 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
1514 break;
1515 default:
1516 break;
1517 }
1518
1519 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1520 if (!pcaps)
1521 return ICE_ERR_NO_MEMORY;
1522
1523 /* Get the current phy config */
1524 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1525 NULL);
1526 if (status) {
1527 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
1528 goto out;
1529 }
1530
1531 /* clear the old pause settings */
1532 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
1533 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
1534 /* set the new capabilities */
1535 cfg.caps |= pause_mask;
1536 /* If the capabilities have changed, then set the new config */
1537 if (cfg.caps != pcaps->caps) {
1538 int retry_count, retry_max = 10;
1539
1540 /* Auto restart link so settings take effect */
1541 if (atomic_restart)
1542 cfg.caps |= ICE_AQ_PHY_ENA_ATOMIC_LINK;
1543 /* Copy over all the old settings */
1544 cfg.phy_type_low = pcaps->phy_type_low;
1545 cfg.low_power_ctrl = pcaps->low_power_ctrl;
1546 cfg.eee_cap = pcaps->eee_cap;
1547 cfg.eeer_value = pcaps->eeer_value;
1548 cfg.link_fec_opt = pcaps->link_fec_options;
1549
1550 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
1551 if (status) {
1552 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
1553 goto out;
1554 }
1555
1556 /* Update the link info
1557 * It sometimes takes a really long time for link to
1558 * come back from the atomic reset. Thus, we wait a
1559 * little bit.
1560 */
1561 for (retry_count = 0; retry_count < retry_max; retry_count++) {
1562 status = ice_update_link_info(pi);
1563
1564 if (!status)
1565 break;
1566
1567 mdelay(100);
1568 }
1569
1570 if (status)
1571 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
1572 }
1573
1574out:
1575 devm_kfree(ice_hw_to_dev(hw), pcaps);
1576 return status;
1577}
1578
0b28b702
AV
1579/**
1580 * ice_get_link_status - get status of the HW network link
1581 * @pi: port information structure
1582 * @link_up: pointer to bool (true/false = linkup/linkdown)
1583 *
1584 * Variable link_up is true if link is up, false if link is down.
1585 * The variable link_up is invalid if status is non zero. As a
1586 * result of this call, link status reporting becomes enabled
1587 */
1588enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
1589{
1590 struct ice_phy_info *phy_info;
1591 enum ice_status status = 0;
1592
c7f2c42b 1593 if (!pi || !link_up)
0b28b702
AV
1594 return ICE_ERR_PARAM;
1595
1596 phy_info = &pi->phy;
1597
1598 if (phy_info->get_link_info) {
1599 status = ice_update_link_info(pi);
1600
1601 if (status)
1602 ice_debug(pi->hw, ICE_DBG_LINK,
1603 "get link status error, status = %d\n",
1604 status);
1605 }
1606
1607 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
1608
1609 return status;
1610}
1611
fcea6f3d
AV
1612/**
1613 * ice_aq_set_link_restart_an
1614 * @pi: pointer to the port information structure
1615 * @ena_link: if true: enable link, if false: disable link
1616 * @cd: pointer to command details structure or NULL
1617 *
1618 * Sets up the link and restarts the Auto-Negotiation over the link.
1619 */
1620enum ice_status
1621ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
1622 struct ice_sq_cd *cd)
1623{
1624 struct ice_aqc_restart_an *cmd;
1625 struct ice_aq_desc desc;
1626
1627 cmd = &desc.params.restart_an;
1628
1629 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
1630
1631 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
1632 cmd->lport_num = pi->lport;
1633 if (ena_link)
1634 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
1635 else
1636 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
1637
1638 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
1639}
1640
0b28b702
AV
1641/**
1642 * ice_aq_set_event_mask
1643 * @hw: pointer to the hw struct
1644 * @port_num: port number of the physical function
1645 * @mask: event mask to be set
1646 * @cd: pointer to command details structure or NULL
1647 *
1648 * Set event mask (0x0613)
1649 */
1650enum ice_status
1651ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
1652 struct ice_sq_cd *cd)
1653{
1654 struct ice_aqc_set_event_mask *cmd;
1655 struct ice_aq_desc desc;
1656
1657 cmd = &desc.params.set_event_mask;
1658
1659 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
1660
1661 cmd->lport_num = port_num;
1662
1663 cmd->event_mask = cpu_to_le16(mask);
1664
1665 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1666}
1667
d76a60ba
AV
1668/**
1669 * __ice_aq_get_set_rss_lut
1670 * @hw: pointer to the hardware structure
1671 * @vsi_id: VSI FW index
1672 * @lut_type: LUT table type
1673 * @lut: pointer to the LUT buffer provided by the caller
1674 * @lut_size: size of the LUT buffer
1675 * @glob_lut_idx: global LUT index
1676 * @set: set true to set the table, false to get the table
1677 *
1678 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
1679 */
1680static enum ice_status
1681__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
1682 u16 lut_size, u8 glob_lut_idx, bool set)
1683{
1684 struct ice_aqc_get_set_rss_lut *cmd_resp;
1685 struct ice_aq_desc desc;
1686 enum ice_status status;
1687 u16 flags = 0;
1688
1689 cmd_resp = &desc.params.get_set_rss_lut;
1690
1691 if (set) {
1692 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
1693 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1694 } else {
1695 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
1696 }
1697
1698 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
1699 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
1700 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
1701 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
1702
1703 switch (lut_type) {
1704 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
1705 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
1706 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
1707 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
1708 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
1709 break;
1710 default:
1711 status = ICE_ERR_PARAM;
1712 goto ice_aq_get_set_rss_lut_exit;
1713 }
1714
1715 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
1716 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
1717 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
1718
1719 if (!set)
1720 goto ice_aq_get_set_rss_lut_send;
1721 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
1722 if (!set)
1723 goto ice_aq_get_set_rss_lut_send;
1724 } else {
1725 goto ice_aq_get_set_rss_lut_send;
1726 }
1727
1728 /* LUT size is only valid for Global and PF table types */
4381147d
AV
1729 switch (lut_size) {
1730 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
1731 break;
1732 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
d76a60ba
AV
1733 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
1734 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
1735 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4381147d
AV
1736 break;
1737 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
1738 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
1739 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
1740 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
1741 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
1742 break;
1743 }
1744 /* fall-through */
1745 default:
d76a60ba
AV
1746 status = ICE_ERR_PARAM;
1747 goto ice_aq_get_set_rss_lut_exit;
1748 }
1749
1750ice_aq_get_set_rss_lut_send:
1751 cmd_resp->flags = cpu_to_le16(flags);
1752 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
1753
1754ice_aq_get_set_rss_lut_exit:
1755 return status;
1756}
1757
1758/**
1759 * ice_aq_get_rss_lut
1760 * @hw: pointer to the hardware structure
1761 * @vsi_id: VSI FW index
1762 * @lut_type: LUT table type
1763 * @lut: pointer to the LUT buffer provided by the caller
1764 * @lut_size: size of the LUT buffer
1765 *
1766 * get the RSS lookup table, PF or VSI type
1767 */
1768enum ice_status
1769ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
1770 u16 lut_size)
1771{
1772 return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
1773 false);
1774}
1775
1776/**
1777 * ice_aq_set_rss_lut
1778 * @hw: pointer to the hardware structure
1779 * @vsi_id: VSI FW index
1780 * @lut_type: LUT table type
1781 * @lut: pointer to the LUT buffer provided by the caller
1782 * @lut_size: size of the LUT buffer
1783 *
1784 * set the RSS lookup table, PF or VSI type
1785 */
1786enum ice_status
1787ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
1788 u16 lut_size)
1789{
1790 return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
1791 true);
1792}
1793
1794/**
1795 * __ice_aq_get_set_rss_key
1796 * @hw: pointer to the hw struct
1797 * @vsi_id: VSI FW index
1798 * @key: pointer to key info struct
1799 * @set: set true to set the key, false to get the key
1800 *
1801 * get (0x0B04) or set (0x0B02) the RSS key per VSI
1802 */
1803static enum
1804ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
1805 struct ice_aqc_get_set_rss_keys *key,
1806 bool set)
1807{
1808 struct ice_aqc_get_set_rss_key *cmd_resp;
1809 u16 key_size = sizeof(*key);
1810 struct ice_aq_desc desc;
1811
1812 cmd_resp = &desc.params.get_set_rss_key;
1813
1814 if (set) {
1815 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
1816 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1817 } else {
1818 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
1819 }
1820
1821 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
1822 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
1823 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
1824 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
1825
1826 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
1827}
1828
1829/**
1830 * ice_aq_get_rss_key
1831 * @hw: pointer to the hw struct
1832 * @vsi_id: VSI FW index
1833 * @key: pointer to key info struct
1834 *
1835 * get the RSS key per VSI
1836 */
1837enum ice_status
1838ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id,
1839 struct ice_aqc_get_set_rss_keys *key)
1840{
1841 return __ice_aq_get_set_rss_key(hw, vsi_id, key, false);
1842}
1843
1844/**
1845 * ice_aq_set_rss_key
1846 * @hw: pointer to the hw struct
1847 * @vsi_id: VSI FW index
1848 * @keys: pointer to key info struct
1849 *
1850 * set the RSS key per VSI
1851 */
1852enum ice_status
1853ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id,
1854 struct ice_aqc_get_set_rss_keys *keys)
1855{
1856 return __ice_aq_get_set_rss_key(hw, vsi_id, keys, true);
1857}
1858
cdedef59
AV
1859/**
1860 * ice_aq_add_lan_txq
1861 * @hw: pointer to the hardware structure
1862 * @num_qgrps: Number of added queue groups
1863 * @qg_list: list of queue groups to be added
1864 * @buf_size: size of buffer for indirect command
1865 * @cd: pointer to command details structure or NULL
1866 *
1867 * Add Tx LAN queue (0x0C30)
1868 *
1869 * NOTE:
1870 * Prior to calling add Tx LAN queue:
1871 * Initialize the following as part of the Tx queue context:
1872 * Completion queue ID if the queue uses Completion queue, Quanta profile,
1873 * Cache profile and Packet shaper profile.
1874 *
1875 * After add Tx LAN queue AQ command is completed:
1876 * Interrupts should be associated with specific queues,
1877 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
1878 * flow.
1879 */
1880static enum ice_status
1881ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
1882 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
1883 struct ice_sq_cd *cd)
1884{
1885 u16 i, sum_header_size, sum_q_size = 0;
1886 struct ice_aqc_add_tx_qgrp *list;
1887 struct ice_aqc_add_txqs *cmd;
1888 struct ice_aq_desc desc;
1889
1890 cmd = &desc.params.add_txqs;
1891
1892 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
1893
1894 if (!qg_list)
1895 return ICE_ERR_PARAM;
1896
1897 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
1898 return ICE_ERR_PARAM;
1899
1900 sum_header_size = num_qgrps *
1901 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
1902
1903 list = qg_list;
1904 for (i = 0; i < num_qgrps; i++) {
1905 struct ice_aqc_add_txqs_perq *q = list->txqs;
1906
1907 sum_q_size += list->num_txqs * sizeof(*q);
1908 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
1909 }
1910
1911 if (buf_size != (sum_header_size + sum_q_size))
1912 return ICE_ERR_PARAM;
1913
1914 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1915
1916 cmd->num_qgrps = num_qgrps;
1917
1918 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
1919}
1920
1921/**
1922 * ice_aq_dis_lan_txq
1923 * @hw: pointer to the hardware structure
1924 * @num_qgrps: number of groups in the list
1925 * @qg_list: the list of groups to disable
1926 * @buf_size: the total size of the qg_list buffer in bytes
1927 * @cd: pointer to command details structure or NULL
1928 *
1929 * Disable LAN Tx queue (0x0C31)
1930 */
1931static enum ice_status
1932ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
1933 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
1934 struct ice_sq_cd *cd)
1935{
1936 struct ice_aqc_dis_txqs *cmd;
1937 struct ice_aq_desc desc;
1938 u16 i, sz = 0;
1939
1940 cmd = &desc.params.dis_txqs;
1941 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
1942
1943 if (!qg_list)
1944 return ICE_ERR_PARAM;
1945
1946 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
1947 return ICE_ERR_PARAM;
1948 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1949 cmd->num_entries = num_qgrps;
1950
1951 for (i = 0; i < num_qgrps; ++i) {
1952 /* Calculate the size taken up by the queue IDs in this group */
1953 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
1954
1955 /* Add the size of the group header */
1956 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
1957
1958 /* If the num of queues is even, add 2 bytes of padding */
1959 if ((qg_list[i].num_qs % 2) == 0)
1960 sz += 2;
1961 }
1962
1963 if (buf_size != sz)
1964 return ICE_ERR_PARAM;
1965
1966 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
1967}
1968
1969/* End of FW Admin Queue command wrappers */
1970
1971/**
1972 * ice_write_byte - write a byte to a packed context structure
1973 * @src_ctx: the context structure to read from
1974 * @dest_ctx: the context to be written to
1975 * @ce_info: a description of the struct to be filled
1976 */
1977static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
1978 const struct ice_ctx_ele *ce_info)
1979{
1980 u8 src_byte, dest_byte, mask;
1981 u8 *from, *dest;
1982 u16 shift_width;
1983
1984 /* copy from the next struct field */
1985 from = src_ctx + ce_info->offset;
1986
1987 /* prepare the bits and mask */
1988 shift_width = ce_info->lsb % 8;
1989 mask = (u8)(BIT(ce_info->width) - 1);
1990
1991 src_byte = *from;
1992 src_byte &= mask;
1993
1994 /* shift to correct alignment */
1995 mask <<= shift_width;
1996 src_byte <<= shift_width;
1997
1998 /* get the current bits from the target bit string */
1999 dest = dest_ctx + (ce_info->lsb / 8);
2000
2001 memcpy(&dest_byte, dest, sizeof(dest_byte));
2002
2003 dest_byte &= ~mask; /* get the bits not changing */
2004 dest_byte |= src_byte; /* add in the new bits */
2005
2006 /* put it all back */
2007 memcpy(dest, &dest_byte, sizeof(dest_byte));
2008}
2009
2010/**
2011 * ice_write_word - write a word to a packed context structure
2012 * @src_ctx: the context structure to read from
2013 * @dest_ctx: the context to be written to
2014 * @ce_info: a description of the struct to be filled
2015 */
2016static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
2017 const struct ice_ctx_ele *ce_info)
2018{
2019 u16 src_word, mask;
2020 __le16 dest_word;
2021 u8 *from, *dest;
2022 u16 shift_width;
2023
2024 /* copy from the next struct field */
2025 from = src_ctx + ce_info->offset;
2026
2027 /* prepare the bits and mask */
2028 shift_width = ce_info->lsb % 8;
2029 mask = BIT(ce_info->width) - 1;
2030
2031 /* don't swizzle the bits until after the mask because the mask bits
2032 * will be in a different bit position on big endian machines
2033 */
2034 src_word = *(u16 *)from;
2035 src_word &= mask;
2036
2037 /* shift to correct alignment */
2038 mask <<= shift_width;
2039 src_word <<= shift_width;
2040
2041 /* get the current bits from the target bit string */
2042 dest = dest_ctx + (ce_info->lsb / 8);
2043
2044 memcpy(&dest_word, dest, sizeof(dest_word));
2045
2046 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
2047 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
2048
2049 /* put it all back */
2050 memcpy(dest, &dest_word, sizeof(dest_word));
2051}
2052
2053/**
2054 * ice_write_dword - write a dword to a packed context structure
2055 * @src_ctx: the context structure to read from
2056 * @dest_ctx: the context to be written to
2057 * @ce_info: a description of the struct to be filled
2058 */
2059static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
2060 const struct ice_ctx_ele *ce_info)
2061{
2062 u32 src_dword, mask;
2063 __le32 dest_dword;
2064 u8 *from, *dest;
2065 u16 shift_width;
2066
2067 /* copy from the next struct field */
2068 from = src_ctx + ce_info->offset;
2069
2070 /* prepare the bits and mask */
2071 shift_width = ce_info->lsb % 8;
2072
2073 /* if the field width is exactly 32 on an x86 machine, then the shift
2074 * operation will not work because the SHL instructions count is masked
2075 * to 5 bits so the shift will do nothing
2076 */
2077 if (ce_info->width < 32)
2078 mask = BIT(ce_info->width) - 1;
2079 else
2080 mask = (u32)~0;
2081
2082 /* don't swizzle the bits until after the mask because the mask bits
2083 * will be in a different bit position on big endian machines
2084 */
2085 src_dword = *(u32 *)from;
2086 src_dword &= mask;
2087
2088 /* shift to correct alignment */
2089 mask <<= shift_width;
2090 src_dword <<= shift_width;
2091
2092 /* get the current bits from the target bit string */
2093 dest = dest_ctx + (ce_info->lsb / 8);
2094
2095 memcpy(&dest_dword, dest, sizeof(dest_dword));
2096
2097 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
2098 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
2099
2100 /* put it all back */
2101 memcpy(dest, &dest_dword, sizeof(dest_dword));
2102}
2103
2104/**
2105 * ice_write_qword - write a qword to a packed context structure
2106 * @src_ctx: the context structure to read from
2107 * @dest_ctx: the context to be written to
2108 * @ce_info: a description of the struct to be filled
2109 */
2110static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
2111 const struct ice_ctx_ele *ce_info)
2112{
2113 u64 src_qword, mask;
2114 __le64 dest_qword;
2115 u8 *from, *dest;
2116 u16 shift_width;
2117
2118 /* copy from the next struct field */
2119 from = src_ctx + ce_info->offset;
2120
2121 /* prepare the bits and mask */
2122 shift_width = ce_info->lsb % 8;
2123
2124 /* if the field width is exactly 64 on an x86 machine, then the shift
2125 * operation will not work because the SHL instructions count is masked
2126 * to 6 bits so the shift will do nothing
2127 */
2128 if (ce_info->width < 64)
2129 mask = BIT_ULL(ce_info->width) - 1;
2130 else
2131 mask = (u64)~0;
2132
2133 /* don't swizzle the bits until after the mask because the mask bits
2134 * will be in a different bit position on big endian machines
2135 */
2136 src_qword = *(u64 *)from;
2137 src_qword &= mask;
2138
2139 /* shift to correct alignment */
2140 mask <<= shift_width;
2141 src_qword <<= shift_width;
2142
2143 /* get the current bits from the target bit string */
2144 dest = dest_ctx + (ce_info->lsb / 8);
2145
2146 memcpy(&dest_qword, dest, sizeof(dest_qword));
2147
2148 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
2149 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
2150
2151 /* put it all back */
2152 memcpy(dest, &dest_qword, sizeof(dest_qword));
2153}
2154
2155/**
2156 * ice_set_ctx - set context bits in packed structure
2157 * @src_ctx: pointer to a generic non-packed context structure
2158 * @dest_ctx: pointer to memory for the packed structure
2159 * @ce_info: a description of the structure to be transformed
2160 */
2161enum ice_status
2162ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2163{
2164 int f;
2165
2166 for (f = 0; ce_info[f].width; f++) {
2167 /* We have to deal with each element of the FW response
2168 * using the correct size so that we are correct regardless
2169 * of the endianness of the machine.
2170 */
2171 switch (ce_info[f].size_of) {
2172 case sizeof(u8):
2173 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
2174 break;
2175 case sizeof(u16):
2176 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
2177 break;
2178 case sizeof(u32):
2179 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
2180 break;
2181 case sizeof(u64):
2182 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
2183 break;
2184 default:
2185 return ICE_ERR_INVAL_SIZE;
2186 }
2187 }
2188
2189 return 0;
2190}
2191
2192/**
2193 * ice_ena_vsi_txq
2194 * @pi: port information structure
2195 * @vsi_id: VSI id
2196 * @tc: tc number
2197 * @num_qgrps: Number of added queue groups
2198 * @buf: list of queue groups to be added
2199 * @buf_size: size of buffer for indirect command
2200 * @cd: pointer to command details structure or NULL
2201 *
2202 * This function adds one lan q
2203 */
2204enum ice_status
2205ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
2206 struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
2207 struct ice_sq_cd *cd)
2208{
2209 struct ice_aqc_txsched_elem_data node = { 0 };
2210 struct ice_sched_node *parent;
2211 enum ice_status status;
2212 struct ice_hw *hw;
2213
2214 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2215 return ICE_ERR_CFG;
2216
2217 if (num_qgrps > 1 || buf->num_txqs > 1)
2218 return ICE_ERR_MAX_LIMIT;
2219
2220 hw = pi->hw;
2221
2222 mutex_lock(&pi->sched_lock);
2223
2224 /* find a parent node */
2225 parent = ice_sched_get_free_qparent(pi, vsi_id, tc,
2226 ICE_SCHED_NODE_OWNER_LAN);
2227 if (!parent) {
2228 status = ICE_ERR_PARAM;
2229 goto ena_txq_exit;
2230 }
2231 buf->parent_teid = parent->info.node_teid;
2232 node.parent_teid = parent->info.node_teid;
2233 /* Mark that the values in the "generic" section as valid. The default
2234 * value in the "generic" section is zero. This means that :
2235 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
2236 * - 0 priority among siblings, indicated by Bit 1-3.
2237 * - WFQ, indicated by Bit 4.
2238 * - 0 Adjustment value is used in PSM credit update flow, indicated by
2239 * Bit 5-6.
2240 * - Bit 7 is reserved.
2241 * Without setting the generic section as valid in valid_sections, the
2242 * Admin Q command will fail with error code ICE_AQ_RC_EINVAL.
2243 */
2244 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
2245
2246 /* add the lan q */
2247 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
2248 if (status)
2249 goto ena_txq_exit;
2250
2251 node.node_teid = buf->txqs[0].q_teid;
2252 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
2253
2254 /* add a leaf node into schduler tree q layer */
2255 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
2256
2257ena_txq_exit:
2258 mutex_unlock(&pi->sched_lock);
2259 return status;
2260}
2261
2262/**
2263 * ice_dis_vsi_txq
2264 * @pi: port information structure
2265 * @num_queues: number of queues
2266 * @q_ids: pointer to the q_id array
2267 * @q_teids: pointer to queue node teids
2268 * @cd: pointer to command details structure or NULL
2269 *
2270 * This function removes queues and their corresponding nodes in SW DB
2271 */
2272enum ice_status
2273ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
2274 u32 *q_teids, struct ice_sq_cd *cd)
2275{
2276 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2277 struct ice_aqc_dis_txq_item qg_list;
2278 u16 i;
2279
2280 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2281 return ICE_ERR_CFG;
2282
2283 mutex_lock(&pi->sched_lock);
2284
2285 for (i = 0; i < num_queues; i++) {
2286 struct ice_sched_node *node;
2287
2288 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
2289 if (!node)
2290 continue;
2291 qg_list.parent_teid = node->info.parent_teid;
2292 qg_list.num_qs = 1;
2293 qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
2294 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
2295 sizeof(qg_list), cd);
2296
2297 if (status)
2298 break;
2299 ice_free_sched_node(pi, node);
2300 }
2301 mutex_unlock(&pi->sched_lock);
2302 return status;
2303}
5513b920
AV
2304
2305/**
2306 * ice_cfg_vsi_qs - configure the new/exisiting VSI queues
2307 * @pi: port information structure
2308 * @vsi_id: VSI Id
2309 * @tc_bitmap: TC bitmap
2310 * @maxqs: max queues array per TC
2311 * @owner: lan or rdma
2312 *
2313 * This function adds/updates the VSI queues per TC.
2314 */
2315static enum ice_status
2316ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
2317 u16 *maxqs, u8 owner)
2318{
2319 enum ice_status status = 0;
2320 u8 i;
2321
2322 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2323 return ICE_ERR_CFG;
2324
2325 mutex_lock(&pi->sched_lock);
2326
2327 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
2328 /* configuration is possible only if TC node is present */
2329 if (!ice_sched_get_tc_node(pi, i))
2330 continue;
2331
2332 status = ice_sched_cfg_vsi(pi, vsi_id, i, maxqs[i], owner,
2333 ice_is_tc_ena(tc_bitmap, i));
2334 if (status)
2335 break;
2336 }
2337
2338 mutex_unlock(&pi->sched_lock);
2339 return status;
2340}
2341
2342/**
2343 * ice_cfg_vsi_lan - configure VSI lan queues
2344 * @pi: port information structure
2345 * @vsi_id: VSI Id
2346 * @tc_bitmap: TC bitmap
2347 * @max_lanqs: max lan queues array per TC
2348 *
2349 * This function adds/updates the VSI lan queues per TC.
2350 */
2351enum ice_status
2352ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
2353 u16 *max_lanqs)
2354{
2355 return ice_cfg_vsi_qs(pi, vsi_id, tc_bitmap, max_lanqs,
2356 ICE_SCHED_NODE_OWNER_LAN);
2357}