]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/intel/ice/ice_common.c
virtchnl: Added support to exchange additional speed values
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / intel / ice / ice_common.c
CommitLineData
7ec59eea
AV
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_common.h"
9c20346b 5#include "ice_sched.h"
7ec59eea
AV
6#include "ice_adminq_cmd.h"
7
f31e4b6f
AV
8#define ICE_PF_RESET_WAIT_COUNT 200
9
22ef683b
AV
10#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
11 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
cdedef59
AV
12 ((ICE_RX_OPC_MDID << \
13 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
14 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
15 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
16 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
17
22ef683b
AV
18#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
19 wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
cdedef59
AV
20 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
21 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
22 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
23 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
24 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
26 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
28
f31e4b6f
AV
29/**
30 * ice_set_mac_type - Sets MAC type
31 * @hw: pointer to the HW structure
32 *
33 * This function sets the MAC type of the adapter based on the
34 * vendor ID and device ID stored in the hw structure.
35 */
36static enum ice_status ice_set_mac_type(struct ice_hw *hw)
37{
38 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
39 return ICE_ERR_DEVICE_NOT_SUPPORTED;
40
41 hw->mac_type = ICE_MAC_GENERIC;
42 return 0;
43}
44
45/**
46 * ice_clear_pf_cfg - Clear PF configuration
47 * @hw: pointer to the hardware structure
3968540b
AV
48 *
49 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
50 * configuration, flow director filters, etc.).
f31e4b6f
AV
51 */
52enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
53{
54 struct ice_aq_desc desc;
55
56 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
57
58 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
59}
60
dc49c772
AV
61/**
62 * ice_aq_manage_mac_read - manage MAC address read command
63 * @hw: pointer to the hw struct
64 * @buf: a virtual buffer to hold the manage MAC read response
65 * @buf_size: Size of the virtual buffer
66 * @cd: pointer to command details structure or NULL
67 *
68 * This function is used to return per PF station MAC address (0x0107).
69 * NOTE: Upon successful completion of this command, MAC address information
70 * is returned in user specified buffer. Please interpret user specified
71 * buffer as "manage_mac_read" response.
72 * Response such as various MAC addresses are stored in HW struct (port.mac)
73 * ice_aq_discover_caps is expected to be called before this function is called.
74 */
75static enum ice_status
76ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
77 struct ice_sq_cd *cd)
78{
79 struct ice_aqc_manage_mac_read_resp *resp;
80 struct ice_aqc_manage_mac_read *cmd;
81 struct ice_aq_desc desc;
82 enum ice_status status;
83 u16 flags;
d6fef10c 84 u8 i;
dc49c772
AV
85
86 cmd = &desc.params.mac_read;
87
88 if (buf_size < sizeof(*resp))
89 return ICE_ERR_BUF_TOO_SHORT;
90
91 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
92
93 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
94 if (status)
95 return status;
96
97 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
98 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
99
100 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
101 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
102 return ICE_ERR_CFG;
103 }
104
d6fef10c
MFIP
105 /* A single port can report up to two (LAN and WoL) addresses */
106 for (i = 0; i < cmd->num_addr; i++)
107 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
108 ether_addr_copy(hw->port_info->mac.lan_addr,
109 resp[i].mac_addr);
110 ether_addr_copy(hw->port_info->mac.perm_addr,
111 resp[i].mac_addr);
112 break;
113 }
114
dc49c772
AV
115 return 0;
116}
117
118/**
119 * ice_aq_get_phy_caps - returns PHY capabilities
120 * @pi: port information structure
121 * @qual_mods: report qualified modules
122 * @report_mode: report mode capabilities
123 * @pcaps: structure for PHY capabilities to be filled
124 * @cd: pointer to command details structure or NULL
125 *
126 * Returns the various PHY capabilities supported on the Port (0x0600)
127 */
48cb27f2 128enum ice_status
dc49c772
AV
129ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
130 struct ice_aqc_get_phy_caps_data *pcaps,
131 struct ice_sq_cd *cd)
132{
133 struct ice_aqc_get_phy_caps *cmd;
134 u16 pcaps_size = sizeof(*pcaps);
135 struct ice_aq_desc desc;
136 enum ice_status status;
137
138 cmd = &desc.params.get_phy;
139
140 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
141 return ICE_ERR_PARAM;
142
143 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
144
145 if (qual_mods)
146 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
147
148 cmd->param0 |= cpu_to_le16(report_mode);
149 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
150
151 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
152 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
153
154 return status;
155}
156
157/**
158 * ice_get_media_type - Gets media type
159 * @pi: port information structure
160 */
161static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
162{
163 struct ice_link_status *hw_link_info;
164
165 if (!pi)
166 return ICE_MEDIA_UNKNOWN;
167
168 hw_link_info = &pi->phy.link_info;
169
170 if (hw_link_info->phy_type_low) {
171 switch (hw_link_info->phy_type_low) {
172 case ICE_PHY_TYPE_LOW_1000BASE_SX:
173 case ICE_PHY_TYPE_LOW_1000BASE_LX:
174 case ICE_PHY_TYPE_LOW_10GBASE_SR:
175 case ICE_PHY_TYPE_LOW_10GBASE_LR:
176 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
177 case ICE_PHY_TYPE_LOW_25GBASE_SR:
178 case ICE_PHY_TYPE_LOW_25GBASE_LR:
179 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
180 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
181 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
182 return ICE_MEDIA_FIBER;
183 case ICE_PHY_TYPE_LOW_100BASE_TX:
184 case ICE_PHY_TYPE_LOW_1000BASE_T:
185 case ICE_PHY_TYPE_LOW_2500BASE_T:
186 case ICE_PHY_TYPE_LOW_5GBASE_T:
187 case ICE_PHY_TYPE_LOW_10GBASE_T:
188 case ICE_PHY_TYPE_LOW_25GBASE_T:
189 return ICE_MEDIA_BASET;
190 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
191 case ICE_PHY_TYPE_LOW_25GBASE_CR:
192 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
193 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
194 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
195 return ICE_MEDIA_DA;
196 case ICE_PHY_TYPE_LOW_1000BASE_KX:
197 case ICE_PHY_TYPE_LOW_2500BASE_KX:
198 case ICE_PHY_TYPE_LOW_2500BASE_X:
199 case ICE_PHY_TYPE_LOW_5GBASE_KR:
200 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
201 case ICE_PHY_TYPE_LOW_25GBASE_KR:
202 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
203 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
204 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
205 return ICE_MEDIA_BACKPLANE;
206 }
207 }
208
209 return ICE_MEDIA_UNKNOWN;
210}
211
212/**
213 * ice_aq_get_link_info
214 * @pi: port information structure
215 * @ena_lse: enable/disable LinkStatusEvent reporting
216 * @link: pointer to link status structure - optional
217 * @cd: pointer to command details structure or NULL
218 *
219 * Get Link Status (0x607). Returns the link status of the adapter.
220 */
221enum ice_status
222ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
223 struct ice_link_status *link, struct ice_sq_cd *cd)
224{
225 struct ice_link_status *hw_link_info_old, *hw_link_info;
226 struct ice_aqc_get_link_status_data link_data = { 0 };
227 struct ice_aqc_get_link_status *resp;
228 enum ice_media_type *hw_media_type;
229 struct ice_fc_info *hw_fc_info;
230 bool tx_pause, rx_pause;
231 struct ice_aq_desc desc;
232 enum ice_status status;
233 u16 cmd_flags;
234
235 if (!pi)
236 return ICE_ERR_PARAM;
237 hw_link_info_old = &pi->phy.link_info_old;
238 hw_media_type = &pi->phy.media_type;
239 hw_link_info = &pi->phy.link_info;
240 hw_fc_info = &pi->fc;
241
242 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
243 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
244 resp = &desc.params.get_link_status;
245 resp->cmd_flags = cpu_to_le16(cmd_flags);
246 resp->lport_num = pi->lport;
247
248 status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
249 cd);
250
251 if (status)
252 return status;
253
254 /* save off old link status information */
255 *hw_link_info_old = *hw_link_info;
256
257 /* update current link status information */
258 hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
259 hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
260 *hw_media_type = ice_get_media_type(pi);
261 hw_link_info->link_info = link_data.link_info;
262 hw_link_info->an_info = link_data.an_info;
263 hw_link_info->ext_info = link_data.ext_info;
264 hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
265 hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
266
267 /* update fc info */
268 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
269 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
270 if (tx_pause && rx_pause)
271 hw_fc_info->current_mode = ICE_FC_FULL;
272 else if (tx_pause)
273 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
274 else if (rx_pause)
275 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
276 else
277 hw_fc_info->current_mode = ICE_FC_NONE;
278
279 hw_link_info->lse_ena =
280 !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
281
282 /* save link status information */
283 if (link)
284 *link = *hw_link_info;
285
286 /* flag cleared so calling functions don't call AQ again */
287 pi->phy.get_link_info = false;
288
289 return status;
290}
291
cdedef59 292/**
22ef683b 293 * ice_init_flex_flags
cdedef59 294 * @hw: pointer to the hardware structure
22ef683b 295 * @prof_id: Rx Descriptor Builder profile ID
cdedef59 296 *
22ef683b 297 * Function to initialize Rx flex flags
cdedef59 298 */
22ef683b 299static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
cdedef59
AV
300{
301 u8 idx = 0;
302
22ef683b
AV
303 /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
304 * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
305 * flexiflags1[3:0] - Not used for flag programming
306 * flexiflags2[7:0] - Tunnel and VLAN types
307 * 2 invalid fields in last index
308 */
309 switch (prof_id) {
310 /* Rx flex flags are currently programmed for the NIC profiles only.
311 * Different flag bit programming configurations can be added per
312 * profile as needed.
313 */
314 case ICE_RXDID_FLEX_NIC:
315 case ICE_RXDID_FLEX_NIC_2:
316 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
317 ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
318 ICE_RXFLG_FIN, idx++);
319 /* flex flag 1 is not used for flexi-flag programming, skipping
320 * these four FLG64 bits.
321 */
322 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
323 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
324 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
325 ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
326 ICE_RXFLG_EVLAN_x9100, idx++);
327 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
328 ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
329 ICE_RXFLG_TNL0, idx++);
330 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
331 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
332 break;
333
334 default:
335 ice_debug(hw, ICE_DBG_INIT,
336 "Flag programming for profile ID %d not supported\n",
337 prof_id);
338 }
339}
340
341/**
342 * ice_init_flex_flds
343 * @hw: pointer to the hardware structure
344 * @prof_id: Rx Descriptor Builder profile ID
345 *
346 * Function to initialize flex descriptors
347 */
348static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
349{
350 enum ice_flex_rx_mdid mdid;
351
352 switch (prof_id) {
353 case ICE_RXDID_FLEX_NIC:
354 case ICE_RXDID_FLEX_NIC_2:
355 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
356 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
357 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
358
359 mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
360 ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
361
362 ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
363
364 ice_init_flex_flags(hw, prof_id);
365 break;
366
367 default:
368 ice_debug(hw, ICE_DBG_INIT,
369 "Field init for profile ID %d not supported\n",
370 prof_id);
371 }
cdedef59
AV
372}
373
9daf8208
AV
374/**
375 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
376 * @hw: pointer to the hw struct
377 */
378static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
379{
380 struct ice_switch_info *sw;
381
382 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
383 sizeof(*hw->switch_info), GFP_KERNEL);
384 sw = hw->switch_info;
385
386 if (!sw)
387 return ICE_ERR_NO_MEMORY;
388
389 INIT_LIST_HEAD(&sw->vsi_list_map_head);
390
80d144c9 391 ice_init_def_sw_recp(hw);
9daf8208
AV
392
393 return 0;
394}
395
396/**
397 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
398 * @hw: pointer to the hw struct
399 */
400static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
401{
402 struct ice_switch_info *sw = hw->switch_info;
403 struct ice_vsi_list_map_info *v_pos_map;
404 struct ice_vsi_list_map_info *v_tmp_map;
80d144c9
AV
405 struct ice_sw_recipe *recps;
406 u8 i;
9daf8208
AV
407
408 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
409 list_entry) {
410 list_del(&v_pos_map->list_entry);
411 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
412 }
80d144c9
AV
413 recps = hw->switch_info->recp_list;
414 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
415 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
416
417 recps[i].root_rid = i;
418 mutex_destroy(&recps[i].filt_rule_lock);
419 list_for_each_entry_safe(lst_itr, tmp_entry,
420 &recps[i].filt_rules, list_entry) {
421 list_del(&lst_itr->list_entry);
422 devm_kfree(ice_hw_to_dev(hw), lst_itr);
423 }
424 }
334cb062 425 ice_rm_all_sw_replay_rule_info(hw);
80d144c9 426 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
9daf8208
AV
427 devm_kfree(ice_hw_to_dev(hw), sw);
428}
429
8b97ceb1
HT
430#define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \
431 (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
432#define ICE_FW_LOG_DESC_SIZE_MAX \
433 ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
434
435/**
436 * ice_cfg_fw_log - configure FW logging
437 * @hw: pointer to the hw struct
438 * @enable: enable certain FW logging events if true, disable all if false
439 *
440 * This function enables/disables the FW logging via Rx CQ events and a UART
441 * port based on predetermined configurations. FW logging via the Rx CQ can be
442 * enabled/disabled for individual PF's. However, FW logging via the UART can
443 * only be enabled/disabled for all PFs on the same device.
444 *
445 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
446 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
447 * before initializing the device.
448 *
449 * When re/configuring FW logging, callers need to update the "cfg" elements of
450 * the hw->fw_log.evnts array with the desired logging event configurations for
451 * modules of interest. When disabling FW logging completely, the callers can
452 * just pass false in the "enable" parameter. On completion, the function will
453 * update the "cur" element of the hw->fw_log.evnts array with the resulting
454 * logging event configurations of the modules that are being re/configured. FW
455 * logging modules that are not part of a reconfiguration operation retain their
456 * previous states.
457 *
458 * Before resetting the device, it is recommended that the driver disables FW
459 * logging before shutting down the control queue. When disabling FW logging
460 * ("enable" = false), the latest configurations of FW logging events stored in
461 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
462 * a device reset.
463 *
464 * When enabling FW logging to emit log messages via the Rx CQ during the
465 * device's initialization phase, a mechanism alternative to interrupt handlers
466 * needs to be used to extract FW log messages from the Rx CQ periodically and
467 * to prevent the Rx CQ from being full and stalling other types of control
468 * messages from FW to SW. Interrupts are typically disabled during the device's
469 * initialization phase.
470 */
471static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
472{
473 struct ice_aqc_fw_logging_data *data = NULL;
474 struct ice_aqc_fw_logging *cmd;
475 enum ice_status status = 0;
476 u16 i, chgs = 0, len = 0;
477 struct ice_aq_desc desc;
478 u8 actv_evnts = 0;
479 void *buf = NULL;
480
481 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
482 return 0;
483
484 /* Disable FW logging only when the control queue is still responsive */
485 if (!enable &&
486 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
487 return 0;
488
489 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
490 cmd = &desc.params.fw_logging;
491
492 /* Indicate which controls are valid */
493 if (hw->fw_log.cq_en)
494 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
495
496 if (hw->fw_log.uart_en)
497 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
498
499 if (enable) {
500 /* Fill in an array of entries with FW logging modules and
501 * logging events being reconfigured.
502 */
503 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
504 u16 val;
505
506 /* Keep track of enabled event types */
507 actv_evnts |= hw->fw_log.evnts[i].cfg;
508
509 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
510 continue;
511
512 if (!data) {
513 data = devm_kzalloc(ice_hw_to_dev(hw),
514 ICE_FW_LOG_DESC_SIZE_MAX,
515 GFP_KERNEL);
516 if (!data)
517 return ICE_ERR_NO_MEMORY;
518 }
519
520 val = i << ICE_AQC_FW_LOG_ID_S;
521 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
522 data->entry[chgs++] = cpu_to_le16(val);
523 }
524
525 /* Only enable FW logging if at least one module is specified.
526 * If FW logging is currently enabled but all modules are not
527 * enabled to emit log messages, disable FW logging altogether.
528 */
529 if (actv_evnts) {
530 /* Leave if there is effectively no change */
531 if (!chgs)
532 goto out;
533
534 if (hw->fw_log.cq_en)
535 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
536
537 if (hw->fw_log.uart_en)
538 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
539
540 buf = data;
541 len = ICE_FW_LOG_DESC_SIZE(chgs);
542 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
543 }
544 }
545
546 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
547 if (!status) {
548 /* Update the current configuration to reflect events enabled.
549 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
550 * logging mode is enabled for the device. They do not reflect
551 * actual modules being enabled to emit log messages. So, their
552 * values remain unchanged even when all modules are disabled.
553 */
554 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
555
556 hw->fw_log.actv_evnts = actv_evnts;
557 for (i = 0; i < cnt; i++) {
558 u16 v, m;
559
560 if (!enable) {
561 /* When disabling all FW logging events as part
562 * of device's de-initialization, the original
563 * configurations are retained, and can be used
564 * to reconfigure FW logging later if the device
565 * is re-initialized.
566 */
567 hw->fw_log.evnts[i].cur = 0;
568 continue;
569 }
570
571 v = le16_to_cpu(data->entry[i]);
572 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
573 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
574 }
575 }
576
577out:
578 if (data)
579 devm_kfree(ice_hw_to_dev(hw), data);
580
581 return status;
582}
583
584/**
585 * ice_output_fw_log
586 * @hw: pointer to the hw struct
587 * @desc: pointer to the AQ message descriptor
588 * @buf: pointer to the buffer accompanying the AQ message
589 *
590 * Formats a FW Log message and outputs it via the standard driver logs.
591 */
592void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
593{
594 ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n");
595 ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf,
596 le16_to_cpu(desc->datalen));
597 ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n");
598}
599
9e4ab4c2
BC
600/**
601 * ice_get_itr_intrl_gran - determine int/intrl granularity
602 * @hw: pointer to the hw struct
603 *
604 * Determines the itr/intrl granularities based on the maximum aggregate
605 * bandwidth according to the device's configuration during power-on.
606 */
607static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
608{
609 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
610 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
611 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
612
613 switch (max_agg_bw) {
614 case ICE_MAX_AGG_BW_200G:
615 case ICE_MAX_AGG_BW_100G:
616 case ICE_MAX_AGG_BW_50G:
617 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
618 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
619 break;
620 case ICE_MAX_AGG_BW_25G:
621 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
622 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
623 break;
624 default:
625 ice_debug(hw, ICE_DBG_INIT,
626 "Failed to determine itr/intrl granularity\n");
627 return ICE_ERR_CFG;
628 }
629
630 return 0;
631}
632
f31e4b6f
AV
633/**
634 * ice_init_hw - main hardware initialization routine
635 * @hw: pointer to the hardware structure
636 */
637enum ice_status ice_init_hw(struct ice_hw *hw)
638{
dc49c772 639 struct ice_aqc_get_phy_caps_data *pcaps;
f31e4b6f 640 enum ice_status status;
dc49c772
AV
641 u16 mac_buf_len;
642 void *mac_buf;
f31e4b6f
AV
643
644 /* Set MAC type based on DeviceID */
645 status = ice_set_mac_type(hw);
646 if (status)
647 return status;
648
649 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
650 PF_FUNC_RID_FUNC_NUM_M) >>
651 PF_FUNC_RID_FUNC_NUM_S;
652
653 status = ice_reset(hw, ICE_RESET_PFR);
654 if (status)
655 return status;
656
9e4ab4c2
BC
657 status = ice_get_itr_intrl_gran(hw);
658 if (status)
659 return status;
940b61af 660
f31e4b6f
AV
661 status = ice_init_all_ctrlq(hw);
662 if (status)
663 goto err_unroll_cqinit;
664
8b97ceb1
HT
665 /* Enable FW logging. Not fatal if this fails. */
666 status = ice_cfg_fw_log(hw, true);
667 if (status)
668 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
669
f31e4b6f
AV
670 status = ice_clear_pf_cfg(hw);
671 if (status)
672 goto err_unroll_cqinit;
673
674 ice_clear_pxe_mode(hw);
675
676 status = ice_init_nvm(hw);
677 if (status)
678 goto err_unroll_cqinit;
679
9c20346b
AV
680 status = ice_get_caps(hw);
681 if (status)
682 goto err_unroll_cqinit;
683
684 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
685 sizeof(*hw->port_info), GFP_KERNEL);
686 if (!hw->port_info) {
687 status = ICE_ERR_NO_MEMORY;
688 goto err_unroll_cqinit;
689 }
690
691 /* set the back pointer to hw */
692 hw->port_info->hw = hw;
693
694 /* Initialize port_info struct with switch configuration data */
695 status = ice_get_initial_sw_cfg(hw);
696 if (status)
697 goto err_unroll_alloc;
698
9daf8208
AV
699 hw->evb_veb = true;
700
9c20346b
AV
701 /* Query the allocated resources for tx scheduler */
702 status = ice_sched_query_res_alloc(hw);
703 if (status) {
704 ice_debug(hw, ICE_DBG_SCHED,
705 "Failed to get scheduler allocated resources\n");
706 goto err_unroll_alloc;
707 }
708
dc49c772
AV
709 /* Initialize port_info struct with scheduler data */
710 status = ice_sched_init_port(hw->port_info);
711 if (status)
712 goto err_unroll_sched;
713
714 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
715 if (!pcaps) {
716 status = ICE_ERR_NO_MEMORY;
717 goto err_unroll_sched;
718 }
719
720 /* Initialize port_info struct with PHY capabilities */
721 status = ice_aq_get_phy_caps(hw->port_info, false,
722 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
723 devm_kfree(ice_hw_to_dev(hw), pcaps);
724 if (status)
725 goto err_unroll_sched;
726
727 /* Initialize port_info struct with link information */
728 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
729 if (status)
730 goto err_unroll_sched;
731
b36c598c
AV
732 /* need a valid SW entry point to build a Tx tree */
733 if (!hw->sw_entry_point_layer) {
734 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
735 status = ICE_ERR_CFG;
736 goto err_unroll_sched;
737 }
738
9daf8208
AV
739 status = ice_init_fltr_mgmt_struct(hw);
740 if (status)
741 goto err_unroll_sched;
742
d6fef10c
MFIP
743 /* Get MAC information */
744 /* A single port can report up to two (LAN and WoL) addresses */
745 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
746 sizeof(struct ice_aqc_manage_mac_read_resp),
747 GFP_KERNEL);
748 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
dc49c772 749
63bb4e1e
WY
750 if (!mac_buf) {
751 status = ICE_ERR_NO_MEMORY;
9daf8208 752 goto err_unroll_fltr_mgmt_struct;
63bb4e1e 753 }
dc49c772
AV
754
755 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
756 devm_kfree(ice_hw_to_dev(hw), mac_buf);
757
758 if (status)
9daf8208 759 goto err_unroll_fltr_mgmt_struct;
dc49c772 760
22ef683b
AV
761 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
762 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
cdedef59 763
f31e4b6f
AV
764 return 0;
765
9daf8208
AV
766err_unroll_fltr_mgmt_struct:
767 ice_cleanup_fltr_mgmt_struct(hw);
dc49c772
AV
768err_unroll_sched:
769 ice_sched_cleanup_all(hw);
9c20346b
AV
770err_unroll_alloc:
771 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
f31e4b6f
AV
772err_unroll_cqinit:
773 ice_shutdown_all_ctrlq(hw);
774 return status;
775}
776
777/**
778 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
779 * @hw: pointer to the hardware structure
780 */
781void ice_deinit_hw(struct ice_hw *hw)
782{
8b97ceb1
HT
783 ice_cleanup_fltr_mgmt_struct(hw);
784
9c20346b 785 ice_sched_cleanup_all(hw);
dc49c772 786
9c20346b
AV
787 if (hw->port_info) {
788 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
789 hw->port_info = NULL;
790 }
9daf8208 791
8b97ceb1
HT
792 /* Attempt to disable FW logging before shutting down control queues */
793 ice_cfg_fw_log(hw, false);
794 ice_shutdown_all_ctrlq(hw);
f31e4b6f
AV
795}
796
797/**
798 * ice_check_reset - Check to see if a global reset is complete
799 * @hw: pointer to the hardware structure
800 */
801enum ice_status ice_check_reset(struct ice_hw *hw)
802{
803 u32 cnt, reg = 0, grst_delay;
804
805 /* Poll for Device Active state in case a recent CORER, GLOBR,
806 * or EMPR has occurred. The grst delay value is in 100ms units.
807 * Add 1sec for outstanding AQ commands that can take a long time.
808 */
809 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
810 GLGEN_RSTCTL_GRSTDEL_S) + 10;
811
812 for (cnt = 0; cnt < grst_delay; cnt++) {
813 mdelay(100);
814 reg = rd32(hw, GLGEN_RSTAT);
815 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
816 break;
817 }
818
819 if (cnt == grst_delay) {
820 ice_debug(hw, ICE_DBG_INIT,
821 "Global reset polling failed to complete.\n");
822 return ICE_ERR_RESET_FAILED;
823 }
824
825#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
826 GLNVM_ULD_GLOBR_DONE_M)
827
828 /* Device is Active; check Global Reset processes are done */
829 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
830 reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
831 if (reg == ICE_RESET_DONE_MASK) {
832 ice_debug(hw, ICE_DBG_INIT,
833 "Global reset processes done. %d\n", cnt);
834 break;
835 }
836 mdelay(10);
837 }
838
839 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
840 ice_debug(hw, ICE_DBG_INIT,
841 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
842 reg);
843 return ICE_ERR_RESET_FAILED;
844 }
845
846 return 0;
847}
848
849/**
850 * ice_pf_reset - Reset the PF
851 * @hw: pointer to the hardware structure
852 *
853 * If a global reset has been triggered, this function checks
854 * for its completion and then issues the PF reset
855 */
856static enum ice_status ice_pf_reset(struct ice_hw *hw)
857{
858 u32 cnt, reg;
859
860 /* If at function entry a global reset was already in progress, i.e.
861 * state is not 'device active' or any of the reset done bits are not
862 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
863 * global reset is done.
864 */
865 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
866 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
867 /* poll on global reset currently in progress until done */
868 if (ice_check_reset(hw))
869 return ICE_ERR_RESET_FAILED;
870
871 return 0;
872 }
873
874 /* Reset the PF */
875 reg = rd32(hw, PFGEN_CTRL);
876
877 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
878
879 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
880 reg = rd32(hw, PFGEN_CTRL);
881 if (!(reg & PFGEN_CTRL_PFSWR_M))
882 break;
883
884 mdelay(1);
885 }
886
887 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
888 ice_debug(hw, ICE_DBG_INIT,
889 "PF reset polling failed to complete.\n");
890 return ICE_ERR_RESET_FAILED;
891 }
892
893 return 0;
894}
895
896/**
897 * ice_reset - Perform different types of reset
898 * @hw: pointer to the hardware structure
899 * @req: reset request
900 *
901 * This function triggers a reset as specified by the req parameter.
902 *
903 * Note:
904 * If anything other than a PF reset is triggered, PXE mode is restored.
905 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
906 * interface has been restored in the rebuild flow.
907 */
908enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
909{
910 u32 val = 0;
911
912 switch (req) {
913 case ICE_RESET_PFR:
914 return ice_pf_reset(hw);
915 case ICE_RESET_CORER:
916 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
917 val = GLGEN_RTRIG_CORER_M;
918 break;
919 case ICE_RESET_GLOBR:
920 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
921 val = GLGEN_RTRIG_GLOBR_M;
922 break;
0f9d5027
AV
923 default:
924 return ICE_ERR_PARAM;
f31e4b6f
AV
925 }
926
927 val |= rd32(hw, GLGEN_RTRIG);
928 wr32(hw, GLGEN_RTRIG, val);
929 ice_flush(hw);
930
931 /* wait for the FW to be ready */
932 return ice_check_reset(hw);
933}
934
cdedef59
AV
935/**
936 * ice_copy_rxq_ctx_to_hw
937 * @hw: pointer to the hardware structure
938 * @ice_rxq_ctx: pointer to the rxq context
939 * @rxq_index: the index of the rx queue
940 *
941 * Copies rxq context from dense structure to hw register space
942 */
943static enum ice_status
944ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
945{
946 u8 i;
947
948 if (!ice_rxq_ctx)
949 return ICE_ERR_BAD_PTR;
950
951 if (rxq_index > QRX_CTRL_MAX_INDEX)
952 return ICE_ERR_PARAM;
953
954 /* Copy each dword separately to hw */
955 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
956 wr32(hw, QRX_CONTEXT(i, rxq_index),
957 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
958
959 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
960 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
961 }
962
963 return 0;
964}
965
966/* LAN Rx Queue Context */
967static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
968 /* Field Width LSB */
969 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
970 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
971 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
972 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
973 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
974 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
975 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
976 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
977 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
978 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
979 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
980 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
981 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
982 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
983 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
984 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
985 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
986 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
987 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
988 { 0 }
989};
990
991/**
992 * ice_write_rxq_ctx
993 * @hw: pointer to the hardware structure
994 * @rlan_ctx: pointer to the rxq context
995 * @rxq_index: the index of the rx queue
996 *
997 * Converts rxq context from sparse to dense structure and then writes
998 * it to hw register space
999 */
1000enum ice_status
1001ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1002 u32 rxq_index)
1003{
1004 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1005
1006 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1007 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1008}
1009
1010/* LAN Tx Queue Context */
1011const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1012 /* Field Width LSB */
1013 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1014 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1015 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1016 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1017 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1018 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1019 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1020 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1021 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1022 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1023 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1024 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1025 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1026 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1027 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1028 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1029 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1030 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1031 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1032 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1033 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1034 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1035 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1036 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1037 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1038 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1039 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171),
1040 { 0 }
1041};
1042
7ec59eea
AV
1043/**
1044 * ice_debug_cq
1045 * @hw: pointer to the hardware structure
1046 * @mask: debug mask
1047 * @desc: pointer to control queue descriptor
1048 * @buf: pointer to command buffer
1049 * @buf_len: max length of buf
1050 *
1051 * Dumps debug log about control command with descriptor contents.
1052 */
1053void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
1054 void *buf, u16 buf_len)
1055{
1056 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
1057 u16 len;
1058
1059#ifndef CONFIG_DYNAMIC_DEBUG
1060 if (!(mask & hw->debug_mask))
1061 return;
1062#endif
1063
1064 if (!desc)
1065 return;
1066
1067 len = le16_to_cpu(cq_desc->datalen);
1068
1069 ice_debug(hw, mask,
1070 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
1071 le16_to_cpu(cq_desc->opcode),
1072 le16_to_cpu(cq_desc->flags),
1073 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
1074 ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
1075 le32_to_cpu(cq_desc->cookie_high),
1076 le32_to_cpu(cq_desc->cookie_low));
1077 ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
1078 le32_to_cpu(cq_desc->params.generic.param0),
1079 le32_to_cpu(cq_desc->params.generic.param1));
1080 ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
1081 le32_to_cpu(cq_desc->params.generic.addr_high),
1082 le32_to_cpu(cq_desc->params.generic.addr_low));
1083 if (buf && cq_desc->datalen != 0) {
1084 ice_debug(hw, mask, "Buffer:\n");
1085 if (buf_len < len)
1086 len = buf_len;
1087
1088 ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
1089 }
1090}
1091
1092/* FW Admin Queue command wrappers */
1093
1094/**
1095 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1096 * @hw: pointer to the hw struct
1097 * @desc: descriptor describing the command
1098 * @buf: buffer to use for indirect commands (NULL for direct commands)
1099 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1100 * @cd: pointer to command details structure
1101 *
1102 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1103 */
1104enum ice_status
1105ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1106 u16 buf_size, struct ice_sq_cd *cd)
1107{
1108 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1109}
1110
1111/**
1112 * ice_aq_get_fw_ver
1113 * @hw: pointer to the hw struct
1114 * @cd: pointer to command details structure or NULL
1115 *
1116 * Get the firmware version (0x0001) from the admin queue commands
1117 */
1118enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1119{
1120 struct ice_aqc_get_ver *resp;
1121 struct ice_aq_desc desc;
1122 enum ice_status status;
1123
1124 resp = &desc.params.get_ver;
1125
1126 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1127
1128 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1129
1130 if (!status) {
1131 hw->fw_branch = resp->fw_branch;
1132 hw->fw_maj_ver = resp->fw_major;
1133 hw->fw_min_ver = resp->fw_minor;
1134 hw->fw_patch = resp->fw_patch;
1135 hw->fw_build = le32_to_cpu(resp->fw_build);
1136 hw->api_branch = resp->api_branch;
1137 hw->api_maj_ver = resp->api_major;
1138 hw->api_min_ver = resp->api_minor;
1139 hw->api_patch = resp->api_patch;
1140 }
1141
1142 return status;
1143}
1144
1145/**
1146 * ice_aq_q_shutdown
1147 * @hw: pointer to the hw struct
1148 * @unloading: is the driver unloading itself
1149 *
1150 * Tell the Firmware that we're shutting down the AdminQ and whether
1151 * or not the driver is unloading as well (0x0003).
1152 */
1153enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1154{
1155 struct ice_aqc_q_shutdown *cmd;
1156 struct ice_aq_desc desc;
1157
1158 cmd = &desc.params.q_shutdown;
1159
1160 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1161
1162 if (unloading)
1163 cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
1164
1165 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1166}
f31e4b6f
AV
1167
1168/**
1169 * ice_aq_req_res
1170 * @hw: pointer to the hw struct
1171 * @res: resource id
1172 * @access: access type
1173 * @sdp_number: resource number
1174 * @timeout: the maximum time in ms that the driver may hold the resource
1175 * @cd: pointer to command details structure or NULL
1176 *
ff2b1321
DN
1177 * Requests common resource using the admin queue commands (0x0008).
1178 * When attempting to acquire the Global Config Lock, the driver can
1179 * learn of three states:
1180 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1181 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1182 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1183 * successfully downloaded the package; the driver does
1184 * not have to download the package and can continue
1185 * loading
1186 *
1187 * Note that if the caller is in an acquire lock, perform action, release lock
1188 * phase of operation, it is possible that the FW may detect a timeout and issue
1189 * a CORER. In this case, the driver will receive a CORER interrupt and will
1190 * have to determine its cause. The calling thread that is handling this flow
1191 * will likely get an error propagated back to it indicating the Download
1192 * Package, Update Package or the Release Resource AQ commands timed out.
f31e4b6f
AV
1193 */
1194static enum ice_status
1195ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1196 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1197 struct ice_sq_cd *cd)
1198{
1199 struct ice_aqc_req_res *cmd_resp;
1200 struct ice_aq_desc desc;
1201 enum ice_status status;
1202
1203 cmd_resp = &desc.params.res_owner;
1204
1205 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1206
1207 cmd_resp->res_id = cpu_to_le16(res);
1208 cmd_resp->access_type = cpu_to_le16(access);
1209 cmd_resp->res_number = cpu_to_le32(sdp_number);
ff2b1321
DN
1210 cmd_resp->timeout = cpu_to_le32(*timeout);
1211 *timeout = 0;
f31e4b6f
AV
1212
1213 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
ff2b1321 1214
f31e4b6f
AV
1215 /* The completion specifies the maximum time in ms that the driver
1216 * may hold the resource in the Timeout field.
ff2b1321
DN
1217 */
1218
1219 /* Global config lock response utilizes an additional status field.
1220 *
1221 * If the Global config lock resource is held by some other driver, the
1222 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1223 * and the timeout field indicates the maximum time the current owner
1224 * of the resource has to free it.
1225 */
1226 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1227 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1228 *timeout = le32_to_cpu(cmd_resp->timeout);
1229 return 0;
1230 } else if (le16_to_cpu(cmd_resp->status) ==
1231 ICE_AQ_RES_GLBL_IN_PROG) {
1232 *timeout = le32_to_cpu(cmd_resp->timeout);
1233 return ICE_ERR_AQ_ERROR;
1234 } else if (le16_to_cpu(cmd_resp->status) ==
1235 ICE_AQ_RES_GLBL_DONE) {
1236 return ICE_ERR_AQ_NO_WORK;
1237 }
1238
1239 /* invalid FW response, force a timeout immediately */
1240 *timeout = 0;
1241 return ICE_ERR_AQ_ERROR;
1242 }
1243
1244 /* If the resource is held by some other driver, the command completes
1245 * with a busy return value and the timeout field indicates the maximum
1246 * time the current owner of the resource has to free it.
f31e4b6f
AV
1247 */
1248 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1249 *timeout = le32_to_cpu(cmd_resp->timeout);
1250
1251 return status;
1252}
1253
1254/**
1255 * ice_aq_release_res
1256 * @hw: pointer to the hw struct
1257 * @res: resource id
1258 * @sdp_number: resource number
1259 * @cd: pointer to command details structure or NULL
1260 *
1261 * release common resource using the admin queue commands (0x0009)
1262 */
1263static enum ice_status
1264ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1265 struct ice_sq_cd *cd)
1266{
1267 struct ice_aqc_req_res *cmd;
1268 struct ice_aq_desc desc;
1269
1270 cmd = &desc.params.res_owner;
1271
1272 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1273
1274 cmd->res_id = cpu_to_le16(res);
1275 cmd->res_number = cpu_to_le32(sdp_number);
1276
1277 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1278}
1279
1280/**
1281 * ice_acquire_res
1282 * @hw: pointer to the HW structure
1283 * @res: resource id
1284 * @access: access type (read or write)
ff2b1321 1285 * @timeout: timeout in milliseconds
f31e4b6f
AV
1286 *
1287 * This function will attempt to acquire the ownership of a resource.
1288 */
1289enum ice_status
1290ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
ff2b1321 1291 enum ice_aq_res_access_type access, u32 timeout)
f31e4b6f
AV
1292{
1293#define ICE_RES_POLLING_DELAY_MS 10
1294 u32 delay = ICE_RES_POLLING_DELAY_MS;
ff2b1321 1295 u32 time_left = timeout;
f31e4b6f 1296 enum ice_status status;
f31e4b6f
AV
1297
1298 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1299
ff2b1321
DN
1300 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1301 * previously acquired the resource and performed any necessary updates;
1302 * in this case the caller does not obtain the resource and has no
1303 * further work to do.
f31e4b6f 1304 */
ff2b1321 1305 if (status == ICE_ERR_AQ_NO_WORK)
f31e4b6f 1306 goto ice_acquire_res_exit;
f31e4b6f
AV
1307
1308 if (status)
1309 ice_debug(hw, ICE_DBG_RES,
1310 "resource %d acquire type %d failed.\n", res, access);
1311
1312 /* If necessary, poll until the current lock owner timeouts */
1313 timeout = time_left;
1314 while (status && timeout && time_left) {
1315 mdelay(delay);
1316 timeout = (timeout > delay) ? timeout - delay : 0;
1317 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1318
ff2b1321 1319 if (status == ICE_ERR_AQ_NO_WORK)
f31e4b6f 1320 /* lock free, but no work to do */
f31e4b6f 1321 break;
f31e4b6f
AV
1322
1323 if (!status)
1324 /* lock acquired */
1325 break;
1326 }
1327 if (status && status != ICE_ERR_AQ_NO_WORK)
1328 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1329
1330ice_acquire_res_exit:
1331 if (status == ICE_ERR_AQ_NO_WORK) {
1332 if (access == ICE_RES_WRITE)
1333 ice_debug(hw, ICE_DBG_RES,
1334 "resource indicates no work to do.\n");
1335 else
1336 ice_debug(hw, ICE_DBG_RES,
1337 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1338 }
1339 return status;
1340}
1341
1342/**
1343 * ice_release_res
1344 * @hw: pointer to the HW structure
1345 * @res: resource id
1346 *
1347 * This function will release a resource using the proper Admin Command.
1348 */
1349void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1350{
1351 enum ice_status status;
1352 u32 total_delay = 0;
1353
1354 status = ice_aq_release_res(hw, res, 0, NULL);
1355
1356 /* there are some rare cases when trying to release the resource
1357 * results in an admin Q timeout, so handle them correctly
1358 */
1359 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1360 (total_delay < hw->adminq.sq_cmd_timeout)) {
1361 mdelay(1);
1362 status = ice_aq_release_res(hw, res, 0, NULL);
1363 total_delay++;
1364 }
1365}
1366
9c20346b
AV
1367/**
1368 * ice_parse_caps - parse function/device capabilities
1369 * @hw: pointer to the hw struct
1370 * @buf: pointer to a buffer containing function/device capability records
1371 * @cap_count: number of capability records in the list
1372 * @opc: type of capabilities list to parse
1373 *
1374 * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1375 */
1376static void
1377ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1378 enum ice_adminq_opc opc)
1379{
1380 struct ice_aqc_list_caps_elem *cap_resp;
1381 struct ice_hw_func_caps *func_p = NULL;
1382 struct ice_hw_dev_caps *dev_p = NULL;
1383 struct ice_hw_common_caps *caps;
1384 u32 i;
1385
1386 if (!buf)
1387 return;
1388
1389 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1390
1391 if (opc == ice_aqc_opc_list_dev_caps) {
1392 dev_p = &hw->dev_caps;
1393 caps = &dev_p->common_cap;
1394 } else if (opc == ice_aqc_opc_list_func_caps) {
1395 func_p = &hw->func_caps;
1396 caps = &func_p->common_cap;
1397 } else {
1398 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1399 return;
1400 }
1401
1402 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1403 u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1404 u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1405 u32 number = le32_to_cpu(cap_resp->number);
1406 u16 cap = le16_to_cpu(cap_resp->cap);
1407
1408 switch (cap) {
1409 case ICE_AQC_CAPS_VSI:
1410 if (dev_p) {
1411 dev_p->num_vsi_allocd_to_host = number;
1412 ice_debug(hw, ICE_DBG_INIT,
1413 "HW caps: Dev.VSI cnt = %d\n",
1414 dev_p->num_vsi_allocd_to_host);
1415 } else if (func_p) {
1416 func_p->guaranteed_num_vsi = number;
1417 ice_debug(hw, ICE_DBG_INIT,
1418 "HW caps: Func.VSI cnt = %d\n",
1419 func_p->guaranteed_num_vsi);
1420 }
1421 break;
1422 case ICE_AQC_CAPS_RSS:
1423 caps->rss_table_size = number;
1424 caps->rss_table_entry_width = logical_id;
1425 ice_debug(hw, ICE_DBG_INIT,
1426 "HW caps: RSS table size = %d\n",
1427 caps->rss_table_size);
1428 ice_debug(hw, ICE_DBG_INIT,
1429 "HW caps: RSS table width = %d\n",
1430 caps->rss_table_entry_width);
1431 break;
1432 case ICE_AQC_CAPS_RXQS:
1433 caps->num_rxq = number;
1434 caps->rxq_first_id = phys_id;
1435 ice_debug(hw, ICE_DBG_INIT,
1436 "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
1437 ice_debug(hw, ICE_DBG_INIT,
1438 "HW caps: Rx first queue ID = %d\n",
1439 caps->rxq_first_id);
1440 break;
1441 case ICE_AQC_CAPS_TXQS:
1442 caps->num_txq = number;
1443 caps->txq_first_id = phys_id;
1444 ice_debug(hw, ICE_DBG_INIT,
1445 "HW caps: Num Tx Qs = %d\n", caps->num_txq);
1446 ice_debug(hw, ICE_DBG_INIT,
1447 "HW caps: Tx first queue ID = %d\n",
1448 caps->txq_first_id);
1449 break;
1450 case ICE_AQC_CAPS_MSIX:
1451 caps->num_msix_vectors = number;
1452 caps->msix_vector_first_id = phys_id;
1453 ice_debug(hw, ICE_DBG_INIT,
1454 "HW caps: MSIX vector count = %d\n",
1455 caps->num_msix_vectors);
1456 ice_debug(hw, ICE_DBG_INIT,
1457 "HW caps: MSIX first vector index = %d\n",
1458 caps->msix_vector_first_id);
1459 break;
1460 case ICE_AQC_CAPS_MAX_MTU:
1461 caps->max_mtu = number;
1462 if (dev_p)
1463 ice_debug(hw, ICE_DBG_INIT,
1464 "HW caps: Dev.MaxMTU = %d\n",
1465 caps->max_mtu);
1466 else if (func_p)
1467 ice_debug(hw, ICE_DBG_INIT,
1468 "HW caps: func.MaxMTU = %d\n",
1469 caps->max_mtu);
1470 break;
1471 default:
1472 ice_debug(hw, ICE_DBG_INIT,
1473 "HW caps: Unknown capability[%d]: 0x%x\n", i,
1474 cap);
1475 break;
1476 }
1477 }
1478}
1479
1480/**
1481 * ice_aq_discover_caps - query function/device capabilities
1482 * @hw: pointer to the hw struct
1483 * @buf: a virtual buffer to hold the capabilities
1484 * @buf_size: Size of the virtual buffer
7d86cf38 1485 * @cap_count: cap count needed if AQ err==ENOMEM
9c20346b
AV
1486 * @opc: capabilities type to discover - pass in the command opcode
1487 * @cd: pointer to command details structure or NULL
1488 *
1489 * Get the function(0x000a)/device(0x000b) capabilities description from
1490 * the firmware.
1491 */
1492static enum ice_status
7d86cf38 1493ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
9c20346b
AV
1494 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1495{
1496 struct ice_aqc_list_caps *cmd;
1497 struct ice_aq_desc desc;
1498 enum ice_status status;
1499
1500 cmd = &desc.params.get_cap;
1501
1502 if (opc != ice_aqc_opc_list_func_caps &&
1503 opc != ice_aqc_opc_list_dev_caps)
1504 return ICE_ERR_PARAM;
1505
1506 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1507
1508 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1509 if (!status)
1510 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
7d86cf38
AV
1511 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1512 *cap_count =
1513 DIV_ROUND_UP(le16_to_cpu(desc.datalen),
1514 sizeof(struct ice_aqc_list_caps_elem));
9c20346b
AV
1515 return status;
1516}
1517
1518/**
7d86cf38 1519 * ice_discover_caps - get info about the HW
9c20346b 1520 * @hw: pointer to the hardware structure
7d86cf38 1521 * @opc: capabilities type to discover - pass in the command opcode
9c20346b 1522 */
7d86cf38
AV
1523static enum ice_status ice_discover_caps(struct ice_hw *hw,
1524 enum ice_adminq_opc opc)
9c20346b
AV
1525{
1526 enum ice_status status;
7d86cf38 1527 u32 cap_count;
9c20346b
AV
1528 u16 cbuf_len;
1529 u8 retries;
1530
1531 /* The driver doesn't know how many capabilities the device will return
1532 * so the buffer size required isn't known ahead of time. The driver
1533 * starts with cbuf_len and if this turns out to be insufficient, the
7d86cf38
AV
1534 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
1535 * The driver then allocates the buffer based on the count and retries
1536 * the operation. So it follows that the retry count is 2.
9c20346b
AV
1537 */
1538#define ICE_GET_CAP_BUF_COUNT 40
1539#define ICE_GET_CAP_RETRY_COUNT 2
1540
7d86cf38 1541 cap_count = ICE_GET_CAP_BUF_COUNT;
9c20346b
AV
1542 retries = ICE_GET_CAP_RETRY_COUNT;
1543
1544 do {
1545 void *cbuf;
1546
7d86cf38
AV
1547 cbuf_len = (u16)(cap_count *
1548 sizeof(struct ice_aqc_list_caps_elem));
9c20346b
AV
1549 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1550 if (!cbuf)
1551 return ICE_ERR_NO_MEMORY;
1552
7d86cf38
AV
1553 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
1554 opc, NULL);
9c20346b
AV
1555 devm_kfree(ice_hw_to_dev(hw), cbuf);
1556
1557 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1558 break;
1559
1560 /* If ENOMEM is returned, try again with bigger buffer */
9c20346b
AV
1561 } while (--retries);
1562
1563 return status;
1564}
1565
7d86cf38
AV
1566/**
1567 * ice_get_caps - get info about the HW
1568 * @hw: pointer to the hardware structure
1569 */
1570enum ice_status ice_get_caps(struct ice_hw *hw)
1571{
1572 enum ice_status status;
1573
1574 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
1575 if (!status)
1576 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
1577
1578 return status;
1579}
1580
e94d4478
AV
1581/**
1582 * ice_aq_manage_mac_write - manage MAC address write command
1583 * @hw: pointer to the hw struct
1584 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
1585 * @flags: flags to control write behavior
1586 * @cd: pointer to command details structure or NULL
1587 *
1588 * This function is used to write MAC address to the NVM (0x0108).
1589 */
1590enum ice_status
1591ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
1592 struct ice_sq_cd *cd)
1593{
1594 struct ice_aqc_manage_mac_write *cmd;
1595 struct ice_aq_desc desc;
1596
1597 cmd = &desc.params.mac_write;
1598 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1599
1600 cmd->flags = flags;
1601
1602 /* Prep values for flags, sah, sal */
1603 cmd->sah = htons(*((u16 *)mac_addr));
1604 cmd->sal = htonl(*((u32 *)(mac_addr + 2)));
1605
1606 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1607}
1608
f31e4b6f
AV
1609/**
1610 * ice_aq_clear_pxe_mode
1611 * @hw: pointer to the hw struct
1612 *
1613 * Tell the firmware that the driver is taking over from PXE (0x0110).
1614 */
1615static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1616{
1617 struct ice_aq_desc desc;
1618
1619 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1620 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1621
1622 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1623}
1624
1625/**
1626 * ice_clear_pxe_mode - clear pxe operations mode
1627 * @hw: pointer to the hw struct
1628 *
1629 * Make sure all PXE mode settings are cleared, including things
1630 * like descriptor fetch/write-back mode.
1631 */
1632void ice_clear_pxe_mode(struct ice_hw *hw)
1633{
1634 if (ice_check_sq_alive(hw, &hw->adminq))
1635 ice_aq_clear_pxe_mode(hw);
1636}
cdedef59 1637
48cb27f2
CC
1638/**
1639 * ice_get_link_speed_based_on_phy_type - returns link speed
1640 * @phy_type_low: lower part of phy_type
1641 *
1642 * This helper function will convert a phy_type_low to its corresponding link
1643 * speed.
1644 * Note: In the structure of phy_type_low, there should be one bit set, as
1645 * this function will convert one phy type to its speed.
1646 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
1647 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
1648 */
1649static u16
1650ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
1651{
1652 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
1653
1654 switch (phy_type_low) {
1655 case ICE_PHY_TYPE_LOW_100BASE_TX:
1656 case ICE_PHY_TYPE_LOW_100M_SGMII:
1657 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
1658 break;
1659 case ICE_PHY_TYPE_LOW_1000BASE_T:
1660 case ICE_PHY_TYPE_LOW_1000BASE_SX:
1661 case ICE_PHY_TYPE_LOW_1000BASE_LX:
1662 case ICE_PHY_TYPE_LOW_1000BASE_KX:
1663 case ICE_PHY_TYPE_LOW_1G_SGMII:
1664 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
1665 break;
1666 case ICE_PHY_TYPE_LOW_2500BASE_T:
1667 case ICE_PHY_TYPE_LOW_2500BASE_X:
1668 case ICE_PHY_TYPE_LOW_2500BASE_KX:
1669 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
1670 break;
1671 case ICE_PHY_TYPE_LOW_5GBASE_T:
1672 case ICE_PHY_TYPE_LOW_5GBASE_KR:
1673 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
1674 break;
1675 case ICE_PHY_TYPE_LOW_10GBASE_T:
1676 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
1677 case ICE_PHY_TYPE_LOW_10GBASE_SR:
1678 case ICE_PHY_TYPE_LOW_10GBASE_LR:
1679 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
1680 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
1681 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
1682 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
1683 break;
1684 case ICE_PHY_TYPE_LOW_25GBASE_T:
1685 case ICE_PHY_TYPE_LOW_25GBASE_CR:
1686 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
1687 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
1688 case ICE_PHY_TYPE_LOW_25GBASE_SR:
1689 case ICE_PHY_TYPE_LOW_25GBASE_LR:
1690 case ICE_PHY_TYPE_LOW_25GBASE_KR:
1691 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
1692 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
1693 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
1694 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
1695 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
1696 break;
1697 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
1698 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
1699 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
1700 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
1701 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
1702 case ICE_PHY_TYPE_LOW_40G_XLAUI:
1703 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
1704 break;
1705 default:
1706 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
1707 break;
1708 }
1709
1710 return speed_phy_type_low;
1711}
1712
1713/**
1714 * ice_update_phy_type
1715 * @phy_type_low: pointer to the lower part of phy_type
1716 * @link_speeds_bitmap: targeted link speeds bitmap
1717 *
1718 * Note: For the link_speeds_bitmap structure, you can check it at
1719 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
1720 * link_speeds_bitmap include multiple speeds.
1721 *
1722 * The value of phy_type_low will present a certain link speed. This helper
1723 * function will turn on bits in the phy_type_low based on the value of
1724 * link_speeds_bitmap input parameter.
1725 */
1726void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap)
1727{
1728 u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
1729 u64 pt_low;
1730 int index;
1731
1732 /* We first check with low part of phy_type */
1733 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
1734 pt_low = BIT_ULL(index);
1735 speed = ice_get_link_speed_based_on_phy_type(pt_low);
1736
1737 if (link_speeds_bitmap & speed)
1738 *phy_type_low |= BIT_ULL(index);
1739 }
1740}
1741
fcea6f3d
AV
1742/**
1743 * ice_aq_set_phy_cfg
1744 * @hw: pointer to the hw struct
1745 * @lport: logical port number
1746 * @cfg: structure with PHY configuration data to be set
1747 * @cd: pointer to command details structure or NULL
1748 *
1749 * Set the various PHY configuration parameters supported on the Port.
1750 * One or more of the Set PHY config parameters may be ignored in an MFP
1751 * mode as the PF may not have the privilege to set some of the PHY Config
1752 * parameters. This status will be indicated by the command response (0x0601).
1753 */
48cb27f2 1754enum ice_status
fcea6f3d
AV
1755ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
1756 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
1757{
fcea6f3d
AV
1758 struct ice_aq_desc desc;
1759
1760 if (!cfg)
1761 return ICE_ERR_PARAM;
1762
fcea6f3d 1763 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
48cb27f2
CC
1764 desc.params.set_phy.lport_num = lport;
1765 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
fcea6f3d
AV
1766
1767 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
1768}
1769
1770/**
1771 * ice_update_link_info - update status of the HW network link
1772 * @pi: port info structure of the interested logical port
1773 */
5755143d 1774enum ice_status ice_update_link_info(struct ice_port_info *pi)
fcea6f3d
AV
1775{
1776 struct ice_aqc_get_phy_caps_data *pcaps;
1777 struct ice_phy_info *phy_info;
1778 enum ice_status status;
1779 struct ice_hw *hw;
1780
1781 if (!pi)
1782 return ICE_ERR_PARAM;
1783
1784 hw = pi->hw;
1785
1786 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1787 if (!pcaps)
1788 return ICE_ERR_NO_MEMORY;
1789
1790 phy_info = &pi->phy;
1791 status = ice_aq_get_link_info(pi, true, NULL, NULL);
1792 if (status)
1793 goto out;
1794
1795 if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
1796 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
1797 pcaps, NULL);
1798 if (status)
1799 goto out;
1800
1801 memcpy(phy_info->link_info.module_type, &pcaps->module_type,
1802 sizeof(phy_info->link_info.module_type));
1803 }
1804out:
1805 devm_kfree(ice_hw_to_dev(hw), pcaps);
1806 return status;
1807}
1808
1809/**
1810 * ice_set_fc
1811 * @pi: port information structure
1812 * @aq_failures: pointer to status code, specific to ice_set_fc routine
48cb27f2 1813 * @ena_auto_link_update: enable automatic link update
fcea6f3d
AV
1814 *
1815 * Set the requested flow control mode.
1816 */
1817enum ice_status
48cb27f2 1818ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
fcea6f3d
AV
1819{
1820 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
1821 struct ice_aqc_get_phy_caps_data *pcaps;
1822 enum ice_status status;
1823 u8 pause_mask = 0x0;
1824 struct ice_hw *hw;
1825
1826 if (!pi)
1827 return ICE_ERR_PARAM;
1828 hw = pi->hw;
1829 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
1830
1831 switch (pi->fc.req_mode) {
1832 case ICE_FC_FULL:
1833 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
1834 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
1835 break;
1836 case ICE_FC_RX_PAUSE:
1837 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
1838 break;
1839 case ICE_FC_TX_PAUSE:
1840 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
1841 break;
1842 default:
1843 break;
1844 }
1845
1846 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1847 if (!pcaps)
1848 return ICE_ERR_NO_MEMORY;
1849
1850 /* Get the current phy config */
1851 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1852 NULL);
1853 if (status) {
1854 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
1855 goto out;
1856 }
1857
1858 /* clear the old pause settings */
1859 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
1860 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
1861 /* set the new capabilities */
1862 cfg.caps |= pause_mask;
1863 /* If the capabilities have changed, then set the new config */
1864 if (cfg.caps != pcaps->caps) {
1865 int retry_count, retry_max = 10;
1866
1867 /* Auto restart link so settings take effect */
48cb27f2
CC
1868 if (ena_auto_link_update)
1869 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
fcea6f3d
AV
1870 /* Copy over all the old settings */
1871 cfg.phy_type_low = pcaps->phy_type_low;
1872 cfg.low_power_ctrl = pcaps->low_power_ctrl;
1873 cfg.eee_cap = pcaps->eee_cap;
1874 cfg.eeer_value = pcaps->eeer_value;
1875 cfg.link_fec_opt = pcaps->link_fec_options;
1876
1877 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
1878 if (status) {
1879 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
1880 goto out;
1881 }
1882
1883 /* Update the link info
1884 * It sometimes takes a really long time for link to
1885 * come back from the atomic reset. Thus, we wait a
1886 * little bit.
1887 */
1888 for (retry_count = 0; retry_count < retry_max; retry_count++) {
1889 status = ice_update_link_info(pi);
1890
1891 if (!status)
1892 break;
1893
1894 mdelay(100);
1895 }
1896
1897 if (status)
1898 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
1899 }
1900
1901out:
1902 devm_kfree(ice_hw_to_dev(hw), pcaps);
1903 return status;
1904}
1905
0b28b702
AV
1906/**
1907 * ice_get_link_status - get status of the HW network link
1908 * @pi: port information structure
1909 * @link_up: pointer to bool (true/false = linkup/linkdown)
1910 *
1911 * Variable link_up is true if link is up, false if link is down.
1912 * The variable link_up is invalid if status is non zero. As a
1913 * result of this call, link status reporting becomes enabled
1914 */
1915enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
1916{
1917 struct ice_phy_info *phy_info;
1918 enum ice_status status = 0;
1919
c7f2c42b 1920 if (!pi || !link_up)
0b28b702
AV
1921 return ICE_ERR_PARAM;
1922
1923 phy_info = &pi->phy;
1924
1925 if (phy_info->get_link_info) {
1926 status = ice_update_link_info(pi);
1927
1928 if (status)
1929 ice_debug(pi->hw, ICE_DBG_LINK,
1930 "get link status error, status = %d\n",
1931 status);
1932 }
1933
1934 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
1935
1936 return status;
1937}
1938
fcea6f3d
AV
1939/**
1940 * ice_aq_set_link_restart_an
1941 * @pi: pointer to the port information structure
1942 * @ena_link: if true: enable link, if false: disable link
1943 * @cd: pointer to command details structure or NULL
1944 *
1945 * Sets up the link and restarts the Auto-Negotiation over the link.
1946 */
1947enum ice_status
1948ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
1949 struct ice_sq_cd *cd)
1950{
1951 struct ice_aqc_restart_an *cmd;
1952 struct ice_aq_desc desc;
1953
1954 cmd = &desc.params.restart_an;
1955
1956 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
1957
1958 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
1959 cmd->lport_num = pi->lport;
1960 if (ena_link)
1961 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
1962 else
1963 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
1964
1965 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
1966}
1967
0b28b702
AV
1968/**
1969 * ice_aq_set_event_mask
1970 * @hw: pointer to the hw struct
1971 * @port_num: port number of the physical function
1972 * @mask: event mask to be set
1973 * @cd: pointer to command details structure or NULL
1974 *
1975 * Set event mask (0x0613)
1976 */
1977enum ice_status
1978ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
1979 struct ice_sq_cd *cd)
1980{
1981 struct ice_aqc_set_event_mask *cmd;
1982 struct ice_aq_desc desc;
1983
1984 cmd = &desc.params.set_event_mask;
1985
1986 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
1987
1988 cmd->lport_num = port_num;
1989
1990 cmd->event_mask = cpu_to_le16(mask);
1991
1992 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1993}
1994
d76a60ba
AV
1995/**
1996 * __ice_aq_get_set_rss_lut
1997 * @hw: pointer to the hardware structure
1998 * @vsi_id: VSI FW index
1999 * @lut_type: LUT table type
2000 * @lut: pointer to the LUT buffer provided by the caller
2001 * @lut_size: size of the LUT buffer
2002 * @glob_lut_idx: global LUT index
2003 * @set: set true to set the table, false to get the table
2004 *
2005 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
2006 */
2007static enum ice_status
2008__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2009 u16 lut_size, u8 glob_lut_idx, bool set)
2010{
2011 struct ice_aqc_get_set_rss_lut *cmd_resp;
2012 struct ice_aq_desc desc;
2013 enum ice_status status;
2014 u16 flags = 0;
2015
2016 cmd_resp = &desc.params.get_set_rss_lut;
2017
2018 if (set) {
2019 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2020 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2021 } else {
2022 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2023 }
2024
2025 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2026 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2027 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2028 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2029
2030 switch (lut_type) {
2031 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2032 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2033 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2034 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2035 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2036 break;
2037 default:
2038 status = ICE_ERR_PARAM;
2039 goto ice_aq_get_set_rss_lut_exit;
2040 }
2041
2042 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2043 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2044 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2045
2046 if (!set)
2047 goto ice_aq_get_set_rss_lut_send;
2048 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2049 if (!set)
2050 goto ice_aq_get_set_rss_lut_send;
2051 } else {
2052 goto ice_aq_get_set_rss_lut_send;
2053 }
2054
2055 /* LUT size is only valid for Global and PF table types */
4381147d
AV
2056 switch (lut_size) {
2057 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2058 break;
2059 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
d76a60ba
AV
2060 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2061 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2062 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4381147d
AV
2063 break;
2064 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2065 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2066 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2067 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2068 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2069 break;
2070 }
2071 /* fall-through */
2072 default:
d76a60ba
AV
2073 status = ICE_ERR_PARAM;
2074 goto ice_aq_get_set_rss_lut_exit;
2075 }
2076
2077ice_aq_get_set_rss_lut_send:
2078 cmd_resp->flags = cpu_to_le16(flags);
2079 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2080
2081ice_aq_get_set_rss_lut_exit:
2082 return status;
2083}
2084
2085/**
2086 * ice_aq_get_rss_lut
2087 * @hw: pointer to the hardware structure
4fb33f31 2088 * @vsi_handle: software VSI handle
d76a60ba
AV
2089 * @lut_type: LUT table type
2090 * @lut: pointer to the LUT buffer provided by the caller
2091 * @lut_size: size of the LUT buffer
2092 *
2093 * get the RSS lookup table, PF or VSI type
2094 */
2095enum ice_status
4fb33f31
AV
2096ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2097 u8 *lut, u16 lut_size)
d76a60ba 2098{
4fb33f31
AV
2099 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2100 return ICE_ERR_PARAM;
2101
2102 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2103 lut_type, lut, lut_size, 0, false);
d76a60ba
AV
2104}
2105
2106/**
2107 * ice_aq_set_rss_lut
2108 * @hw: pointer to the hardware structure
4fb33f31 2109 * @vsi_handle: software VSI handle
d76a60ba
AV
2110 * @lut_type: LUT table type
2111 * @lut: pointer to the LUT buffer provided by the caller
2112 * @lut_size: size of the LUT buffer
2113 *
2114 * set the RSS lookup table, PF or VSI type
2115 */
2116enum ice_status
4fb33f31
AV
2117ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2118 u8 *lut, u16 lut_size)
d76a60ba 2119{
4fb33f31
AV
2120 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2121 return ICE_ERR_PARAM;
2122
2123 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2124 lut_type, lut, lut_size, 0, true);
d76a60ba
AV
2125}
2126
2127/**
2128 * __ice_aq_get_set_rss_key
2129 * @hw: pointer to the hw struct
2130 * @vsi_id: VSI FW index
2131 * @key: pointer to key info struct
2132 * @set: set true to set the key, false to get the key
2133 *
2134 * get (0x0B04) or set (0x0B02) the RSS key per VSI
2135 */
2136static enum
2137ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2138 struct ice_aqc_get_set_rss_keys *key,
2139 bool set)
2140{
2141 struct ice_aqc_get_set_rss_key *cmd_resp;
2142 u16 key_size = sizeof(*key);
2143 struct ice_aq_desc desc;
2144
2145 cmd_resp = &desc.params.get_set_rss_key;
2146
2147 if (set) {
2148 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2149 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2150 } else {
2151 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2152 }
2153
2154 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2155 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2156 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2157 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2158
2159 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2160}
2161
2162/**
2163 * ice_aq_get_rss_key
2164 * @hw: pointer to the hw struct
4fb33f31 2165 * @vsi_handle: software VSI handle
d76a60ba
AV
2166 * @key: pointer to key info struct
2167 *
2168 * get the RSS key per VSI
2169 */
2170enum ice_status
4fb33f31 2171ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
d76a60ba
AV
2172 struct ice_aqc_get_set_rss_keys *key)
2173{
4fb33f31
AV
2174 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
2175 return ICE_ERR_PARAM;
2176
2177 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2178 key, false);
d76a60ba
AV
2179}
2180
2181/**
2182 * ice_aq_set_rss_key
2183 * @hw: pointer to the hw struct
4fb33f31 2184 * @vsi_handle: software VSI handle
d76a60ba
AV
2185 * @keys: pointer to key info struct
2186 *
2187 * set the RSS key per VSI
2188 */
2189enum ice_status
4fb33f31 2190ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
d76a60ba
AV
2191 struct ice_aqc_get_set_rss_keys *keys)
2192{
4fb33f31
AV
2193 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
2194 return ICE_ERR_PARAM;
2195
2196 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2197 keys, true);
d76a60ba
AV
2198}
2199
cdedef59
AV
2200/**
2201 * ice_aq_add_lan_txq
2202 * @hw: pointer to the hardware structure
2203 * @num_qgrps: Number of added queue groups
2204 * @qg_list: list of queue groups to be added
2205 * @buf_size: size of buffer for indirect command
2206 * @cd: pointer to command details structure or NULL
2207 *
2208 * Add Tx LAN queue (0x0C30)
2209 *
2210 * NOTE:
2211 * Prior to calling add Tx LAN queue:
2212 * Initialize the following as part of the Tx queue context:
2213 * Completion queue ID if the queue uses Completion queue, Quanta profile,
2214 * Cache profile and Packet shaper profile.
2215 *
2216 * After add Tx LAN queue AQ command is completed:
2217 * Interrupts should be associated with specific queues,
2218 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
2219 * flow.
2220 */
2221static enum ice_status
2222ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2223 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
2224 struct ice_sq_cd *cd)
2225{
2226 u16 i, sum_header_size, sum_q_size = 0;
2227 struct ice_aqc_add_tx_qgrp *list;
2228 struct ice_aqc_add_txqs *cmd;
2229 struct ice_aq_desc desc;
2230
2231 cmd = &desc.params.add_txqs;
2232
2233 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
2234
2235 if (!qg_list)
2236 return ICE_ERR_PARAM;
2237
2238 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2239 return ICE_ERR_PARAM;
2240
2241 sum_header_size = num_qgrps *
2242 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
2243
2244 list = qg_list;
2245 for (i = 0; i < num_qgrps; i++) {
2246 struct ice_aqc_add_txqs_perq *q = list->txqs;
2247
2248 sum_q_size += list->num_txqs * sizeof(*q);
2249 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
2250 }
2251
2252 if (buf_size != (sum_header_size + sum_q_size))
2253 return ICE_ERR_PARAM;
2254
2255 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2256
2257 cmd->num_qgrps = num_qgrps;
2258
2259 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2260}
2261
2262/**
2263 * ice_aq_dis_lan_txq
2264 * @hw: pointer to the hardware structure
2265 * @num_qgrps: number of groups in the list
2266 * @qg_list: the list of groups to disable
2267 * @buf_size: the total size of the qg_list buffer in bytes
2268 * @cd: pointer to command details structure or NULL
2269 *
2270 * Disable LAN Tx queue (0x0C31)
2271 */
2272static enum ice_status
2273ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2274 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2275 struct ice_sq_cd *cd)
2276{
2277 struct ice_aqc_dis_txqs *cmd;
2278 struct ice_aq_desc desc;
2279 u16 i, sz = 0;
2280
2281 cmd = &desc.params.dis_txqs;
2282 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2283
2284 if (!qg_list)
2285 return ICE_ERR_PARAM;
2286
2287 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2288 return ICE_ERR_PARAM;
2289 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2290 cmd->num_entries = num_qgrps;
2291
2292 for (i = 0; i < num_qgrps; ++i) {
2293 /* Calculate the size taken up by the queue IDs in this group */
2294 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
2295
2296 /* Add the size of the group header */
2297 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
2298
2299 /* If the num of queues is even, add 2 bytes of padding */
2300 if ((qg_list[i].num_qs % 2) == 0)
2301 sz += 2;
2302 }
2303
2304 if (buf_size != sz)
2305 return ICE_ERR_PARAM;
2306
2307 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2308}
2309
2310/* End of FW Admin Queue command wrappers */
2311
2312/**
2313 * ice_write_byte - write a byte to a packed context structure
2314 * @src_ctx: the context structure to read from
2315 * @dest_ctx: the context to be written to
2316 * @ce_info: a description of the struct to be filled
2317 */
2318static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
2319 const struct ice_ctx_ele *ce_info)
2320{
2321 u8 src_byte, dest_byte, mask;
2322 u8 *from, *dest;
2323 u16 shift_width;
2324
2325 /* copy from the next struct field */
2326 from = src_ctx + ce_info->offset;
2327
2328 /* prepare the bits and mask */
2329 shift_width = ce_info->lsb % 8;
2330 mask = (u8)(BIT(ce_info->width) - 1);
2331
2332 src_byte = *from;
2333 src_byte &= mask;
2334
2335 /* shift to correct alignment */
2336 mask <<= shift_width;
2337 src_byte <<= shift_width;
2338
2339 /* get the current bits from the target bit string */
2340 dest = dest_ctx + (ce_info->lsb / 8);
2341
2342 memcpy(&dest_byte, dest, sizeof(dest_byte));
2343
2344 dest_byte &= ~mask; /* get the bits not changing */
2345 dest_byte |= src_byte; /* add in the new bits */
2346
2347 /* put it all back */
2348 memcpy(dest, &dest_byte, sizeof(dest_byte));
2349}
2350
2351/**
2352 * ice_write_word - write a word to a packed context structure
2353 * @src_ctx: the context structure to read from
2354 * @dest_ctx: the context to be written to
2355 * @ce_info: a description of the struct to be filled
2356 */
2357static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
2358 const struct ice_ctx_ele *ce_info)
2359{
2360 u16 src_word, mask;
2361 __le16 dest_word;
2362 u8 *from, *dest;
2363 u16 shift_width;
2364
2365 /* copy from the next struct field */
2366 from = src_ctx + ce_info->offset;
2367
2368 /* prepare the bits and mask */
2369 shift_width = ce_info->lsb % 8;
2370 mask = BIT(ce_info->width) - 1;
2371
2372 /* don't swizzle the bits until after the mask because the mask bits
2373 * will be in a different bit position on big endian machines
2374 */
2375 src_word = *(u16 *)from;
2376 src_word &= mask;
2377
2378 /* shift to correct alignment */
2379 mask <<= shift_width;
2380 src_word <<= shift_width;
2381
2382 /* get the current bits from the target bit string */
2383 dest = dest_ctx + (ce_info->lsb / 8);
2384
2385 memcpy(&dest_word, dest, sizeof(dest_word));
2386
2387 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
2388 dest_word |= cpu_to_le16(src_word); /* add in the new bits */
2389
2390 /* put it all back */
2391 memcpy(dest, &dest_word, sizeof(dest_word));
2392}
2393
2394/**
2395 * ice_write_dword - write a dword to a packed context structure
2396 * @src_ctx: the context structure to read from
2397 * @dest_ctx: the context to be written to
2398 * @ce_info: a description of the struct to be filled
2399 */
2400static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
2401 const struct ice_ctx_ele *ce_info)
2402{
2403 u32 src_dword, mask;
2404 __le32 dest_dword;
2405 u8 *from, *dest;
2406 u16 shift_width;
2407
2408 /* copy from the next struct field */
2409 from = src_ctx + ce_info->offset;
2410
2411 /* prepare the bits and mask */
2412 shift_width = ce_info->lsb % 8;
2413
2414 /* if the field width is exactly 32 on an x86 machine, then the shift
2415 * operation will not work because the SHL instructions count is masked
2416 * to 5 bits so the shift will do nothing
2417 */
2418 if (ce_info->width < 32)
2419 mask = BIT(ce_info->width) - 1;
2420 else
2421 mask = (u32)~0;
2422
2423 /* don't swizzle the bits until after the mask because the mask bits
2424 * will be in a different bit position on big endian machines
2425 */
2426 src_dword = *(u32 *)from;
2427 src_dword &= mask;
2428
2429 /* shift to correct alignment */
2430 mask <<= shift_width;
2431 src_dword <<= shift_width;
2432
2433 /* get the current bits from the target bit string */
2434 dest = dest_ctx + (ce_info->lsb / 8);
2435
2436 memcpy(&dest_dword, dest, sizeof(dest_dword));
2437
2438 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
2439 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
2440
2441 /* put it all back */
2442 memcpy(dest, &dest_dword, sizeof(dest_dword));
2443}
2444
2445/**
2446 * ice_write_qword - write a qword to a packed context structure
2447 * @src_ctx: the context structure to read from
2448 * @dest_ctx: the context to be written to
2449 * @ce_info: a description of the struct to be filled
2450 */
2451static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
2452 const struct ice_ctx_ele *ce_info)
2453{
2454 u64 src_qword, mask;
2455 __le64 dest_qword;
2456 u8 *from, *dest;
2457 u16 shift_width;
2458
2459 /* copy from the next struct field */
2460 from = src_ctx + ce_info->offset;
2461
2462 /* prepare the bits and mask */
2463 shift_width = ce_info->lsb % 8;
2464
2465 /* if the field width is exactly 64 on an x86 machine, then the shift
2466 * operation will not work because the SHL instructions count is masked
2467 * to 6 bits so the shift will do nothing
2468 */
2469 if (ce_info->width < 64)
2470 mask = BIT_ULL(ce_info->width) - 1;
2471 else
2472 mask = (u64)~0;
2473
2474 /* don't swizzle the bits until after the mask because the mask bits
2475 * will be in a different bit position on big endian machines
2476 */
2477 src_qword = *(u64 *)from;
2478 src_qword &= mask;
2479
2480 /* shift to correct alignment */
2481 mask <<= shift_width;
2482 src_qword <<= shift_width;
2483
2484 /* get the current bits from the target bit string */
2485 dest = dest_ctx + (ce_info->lsb / 8);
2486
2487 memcpy(&dest_qword, dest, sizeof(dest_qword));
2488
2489 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
2490 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
2491
2492 /* put it all back */
2493 memcpy(dest, &dest_qword, sizeof(dest_qword));
2494}
2495
2496/**
2497 * ice_set_ctx - set context bits in packed structure
2498 * @src_ctx: pointer to a generic non-packed context structure
2499 * @dest_ctx: pointer to memory for the packed structure
2500 * @ce_info: a description of the structure to be transformed
2501 */
2502enum ice_status
2503ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2504{
2505 int f;
2506
2507 for (f = 0; ce_info[f].width; f++) {
2508 /* We have to deal with each element of the FW response
2509 * using the correct size so that we are correct regardless
2510 * of the endianness of the machine.
2511 */
2512 switch (ce_info[f].size_of) {
2513 case sizeof(u8):
2514 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
2515 break;
2516 case sizeof(u16):
2517 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
2518 break;
2519 case sizeof(u32):
2520 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
2521 break;
2522 case sizeof(u64):
2523 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
2524 break;
2525 default:
2526 return ICE_ERR_INVAL_SIZE;
2527 }
2528 }
2529
2530 return 0;
2531}
2532
2533/**
2534 * ice_ena_vsi_txq
2535 * @pi: port information structure
4fb33f31 2536 * @vsi_handle: software VSI handle
cdedef59
AV
2537 * @tc: tc number
2538 * @num_qgrps: Number of added queue groups
2539 * @buf: list of queue groups to be added
2540 * @buf_size: size of buffer for indirect command
2541 * @cd: pointer to command details structure or NULL
2542 *
2543 * This function adds one lan q
2544 */
2545enum ice_status
4fb33f31 2546ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
cdedef59
AV
2547 struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
2548 struct ice_sq_cd *cd)
2549{
2550 struct ice_aqc_txsched_elem_data node = { 0 };
2551 struct ice_sched_node *parent;
2552 enum ice_status status;
2553 struct ice_hw *hw;
2554
2555 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2556 return ICE_ERR_CFG;
2557
2558 if (num_qgrps > 1 || buf->num_txqs > 1)
2559 return ICE_ERR_MAX_LIMIT;
2560
2561 hw = pi->hw;
2562
4fb33f31
AV
2563 if (!ice_is_vsi_valid(hw, vsi_handle))
2564 return ICE_ERR_PARAM;
2565
cdedef59
AV
2566 mutex_lock(&pi->sched_lock);
2567
2568 /* find a parent node */
4fb33f31 2569 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
cdedef59
AV
2570 ICE_SCHED_NODE_OWNER_LAN);
2571 if (!parent) {
2572 status = ICE_ERR_PARAM;
2573 goto ena_txq_exit;
2574 }
4fb33f31 2575
cdedef59
AV
2576 buf->parent_teid = parent->info.node_teid;
2577 node.parent_teid = parent->info.node_teid;
2578 /* Mark that the values in the "generic" section as valid. The default
2579 * value in the "generic" section is zero. This means that :
2580 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
2581 * - 0 priority among siblings, indicated by Bit 1-3.
2582 * - WFQ, indicated by Bit 4.
2583 * - 0 Adjustment value is used in PSM credit update flow, indicated by
2584 * Bit 5-6.
2585 * - Bit 7 is reserved.
2586 * Without setting the generic section as valid in valid_sections, the
2587 * Admin Q command will fail with error code ICE_AQ_RC_EINVAL.
2588 */
2589 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
2590
2591 /* add the lan q */
2592 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
2593 if (status)
2594 goto ena_txq_exit;
2595
2596 node.node_teid = buf->txqs[0].q_teid;
2597 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
2598
2599 /* add a leaf node into schduler tree q layer */
2600 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
2601
2602ena_txq_exit:
2603 mutex_unlock(&pi->sched_lock);
2604 return status;
2605}
2606
2607/**
2608 * ice_dis_vsi_txq
2609 * @pi: port information structure
2610 * @num_queues: number of queues
2611 * @q_ids: pointer to the q_id array
2612 * @q_teids: pointer to queue node teids
2613 * @cd: pointer to command details structure or NULL
2614 *
2615 * This function removes queues and their corresponding nodes in SW DB
2616 */
2617enum ice_status
2618ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
2619 u32 *q_teids, struct ice_sq_cd *cd)
2620{
2621 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2622 struct ice_aqc_dis_txq_item qg_list;
2623 u16 i;
2624
2625 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2626 return ICE_ERR_CFG;
2627
2628 mutex_lock(&pi->sched_lock);
2629
2630 for (i = 0; i < num_queues; i++) {
2631 struct ice_sched_node *node;
2632
2633 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
2634 if (!node)
2635 continue;
2636 qg_list.parent_teid = node->info.parent_teid;
2637 qg_list.num_qs = 1;
2638 qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
2639 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
2640 sizeof(qg_list), cd);
2641
2642 if (status)
2643 break;
2644 ice_free_sched_node(pi, node);
2645 }
2646 mutex_unlock(&pi->sched_lock);
2647 return status;
2648}
5513b920
AV
2649
2650/**
2651 * ice_cfg_vsi_qs - configure the new/exisiting VSI queues
2652 * @pi: port information structure
4fb33f31 2653 * @vsi_handle: software VSI handle
5513b920
AV
2654 * @tc_bitmap: TC bitmap
2655 * @maxqs: max queues array per TC
2656 * @owner: lan or rdma
2657 *
2658 * This function adds/updates the VSI queues per TC.
2659 */
2660static enum ice_status
4fb33f31 2661ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
5513b920
AV
2662 u16 *maxqs, u8 owner)
2663{
2664 enum ice_status status = 0;
2665 u8 i;
2666
2667 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2668 return ICE_ERR_CFG;
2669
4fb33f31
AV
2670 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2671 return ICE_ERR_PARAM;
2672
5513b920
AV
2673 mutex_lock(&pi->sched_lock);
2674
2675 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
2676 /* configuration is possible only if TC node is present */
2677 if (!ice_sched_get_tc_node(pi, i))
2678 continue;
2679
4fb33f31 2680 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
5513b920
AV
2681 ice_is_tc_ena(tc_bitmap, i));
2682 if (status)
2683 break;
2684 }
2685
2686 mutex_unlock(&pi->sched_lock);
2687 return status;
2688}
2689
2690/**
2691 * ice_cfg_vsi_lan - configure VSI lan queues
2692 * @pi: port information structure
4fb33f31 2693 * @vsi_handle: software VSI handle
5513b920
AV
2694 * @tc_bitmap: TC bitmap
2695 * @max_lanqs: max lan queues array per TC
2696 *
2697 * This function adds/updates the VSI lan queues per TC.
2698 */
2699enum ice_status
4fb33f31 2700ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
5513b920
AV
2701 u16 *max_lanqs)
2702{
4fb33f31 2703 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
5513b920
AV
2704 ICE_SCHED_NODE_OWNER_LAN);
2705}
45d3d428 2706
334cb062
AV
2707/**
2708 * ice_replay_pre_init - replay pre initialization
2709 * @hw: pointer to the hw struct
2710 *
2711 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
2712 */
2713static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
2714{
2715 struct ice_switch_info *sw = hw->switch_info;
2716 u8 i;
2717
2718 /* Delete old entries from replay filter list head if there is any */
2719 ice_rm_all_sw_replay_rule_info(hw);
2720 /* In start of replay, move entries into replay_rules list, it
2721 * will allow adding rules entries back to filt_rules list,
2722 * which is operational list.
2723 */
2724 for (i = 0; i < ICE_SW_LKUP_LAST; i++)
2725 list_replace_init(&sw->recp_list[i].filt_rules,
2726 &sw->recp_list[i].filt_replay_rules);
2727
2728 return 0;
2729}
2730
2731/**
2732 * ice_replay_vsi - replay VSI configuration
2733 * @hw: pointer to the hw struct
2734 * @vsi_handle: driver VSI handle
2735 *
2736 * Restore all VSI configuration after reset. It is required to call this
2737 * function with main VSI first.
2738 */
2739enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
2740{
2741 enum ice_status status;
2742
2743 if (!ice_is_vsi_valid(hw, vsi_handle))
2744 return ICE_ERR_PARAM;
2745
2746 /* Replay pre-initialization if there is any */
2747 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
2748 status = ice_replay_pre_init(hw);
2749 if (status)
2750 return status;
2751 }
2752
2753 /* Replay per VSI all filters */
2754 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
2755 return status;
2756}
2757
2758/**
2759 * ice_replay_post - post replay configuration cleanup
2760 * @hw: pointer to the hw struct
2761 *
2762 * Post replay cleanup.
2763 */
2764void ice_replay_post(struct ice_hw *hw)
2765{
2766 /* Delete old entries from replay filter list head */
2767 ice_rm_all_sw_replay_rule_info(hw);
2768}
2769
45d3d428
AV
2770/**
2771 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
2772 * @hw: ptr to the hardware info
2773 * @hireg: high 32 bit HW register to read from
2774 * @loreg: low 32 bit HW register to read from
2775 * @prev_stat_loaded: bool to specify if previous stats are loaded
2776 * @prev_stat: ptr to previous loaded stat value
2777 * @cur_stat: ptr to current stat value
2778 */
2779void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
2780 bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
2781{
2782 u64 new_data;
2783
2784 new_data = rd32(hw, loreg);
2785 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
2786
2787 /* device stats are not reset at PFR, they likely will not be zeroed
2788 * when the driver starts. So save the first values read and use them as
2789 * offsets to be subtracted from the raw values in order to report stats
2790 * that count from zero.
2791 */
2792 if (!prev_stat_loaded)
2793 *prev_stat = new_data;
2794 if (new_data >= *prev_stat)
2795 *cur_stat = new_data - *prev_stat;
2796 else
2797 /* to manage the potential roll-over */
2798 *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
2799 *cur_stat &= 0xFFFFFFFFFFULL;
2800}
2801
2802/**
2803 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
2804 * @hw: ptr to the hardware info
2805 * @reg: HW register to read from
2806 * @prev_stat_loaded: bool to specify if previous stats are loaded
2807 * @prev_stat: ptr to previous loaded stat value
2808 * @cur_stat: ptr to current stat value
2809 */
2810void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
2811 u64 *prev_stat, u64 *cur_stat)
2812{
2813 u32 new_data;
2814
2815 new_data = rd32(hw, reg);
2816
2817 /* device stats are not reset at PFR, they likely will not be zeroed
2818 * when the driver starts. So save the first values read and use them as
2819 * offsets to be subtracted from the raw values in order to report stats
2820 * that count from zero.
2821 */
2822 if (!prev_stat_loaded)
2823 *prev_stat = new_data;
2824 if (new_data >= *prev_stat)
2825 *cur_stat = new_data - *prev_stat;
2826 else
2827 /* to manage the potential roll-over */
2828 *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
2829}