1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/dma-mapping.h>
5 #include <linux/slab.h>
7 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include "hclge_cmd.h"
12 #include "hclge_main.h"
14 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
16 static int hclge_ring_space(struct hclge_cmq_ring
*ring
)
18 int ntu
= ring
->next_to_use
;
19 int ntc
= ring
->next_to_clean
;
20 int used
= (ntu
- ntc
+ ring
->desc_num
) % ring
->desc_num
;
22 return ring
->desc_num
- used
- 1;
25 static int is_valid_csq_clean_head(struct hclge_cmq_ring
*ring
, int head
)
27 int ntu
= ring
->next_to_use
;
28 int ntc
= ring
->next_to_clean
;
31 return head
>= ntc
&& head
<= ntu
;
33 return head
>= ntc
|| head
<= ntu
;
36 static int hclge_alloc_cmd_desc(struct hclge_cmq_ring
*ring
)
38 int size
= ring
->desc_num
* sizeof(struct hclge_desc
);
40 ring
->desc
= dma_alloc_coherent(cmq_ring_to_dev(ring
), size
,
41 &ring
->desc_dma_addr
, GFP_KERNEL
);
48 static void hclge_free_cmd_desc(struct hclge_cmq_ring
*ring
)
50 int size
= ring
->desc_num
* sizeof(struct hclge_desc
);
53 dma_free_coherent(cmq_ring_to_dev(ring
), size
,
54 ring
->desc
, ring
->desc_dma_addr
);
59 static int hclge_alloc_cmd_queue(struct hclge_dev
*hdev
, int ring_type
)
61 struct hclge_hw
*hw
= &hdev
->hw
;
62 struct hclge_cmq_ring
*ring
=
63 (ring_type
== HCLGE_TYPE_CSQ
) ? &hw
->cmq
.csq
: &hw
->cmq
.crq
;
66 ring
->ring_type
= ring_type
;
69 ret
= hclge_alloc_cmd_desc(ring
);
71 dev_err(&hdev
->pdev
->dev
, "descriptor %s alloc error %d\n",
72 (ring_type
== HCLGE_TYPE_CSQ
) ? "CSQ" : "CRQ", ret
);
79 void hclge_cmd_reuse_desc(struct hclge_desc
*desc
, bool is_read
)
81 desc
->flag
= cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR
| HCLGE_CMD_FLAG_IN
);
83 desc
->flag
|= cpu_to_le16(HCLGE_CMD_FLAG_WR
);
85 desc
->flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_WR
);
88 void hclge_cmd_setup_basic_desc(struct hclge_desc
*desc
,
89 enum hclge_opcode_type opcode
, bool is_read
)
91 memset((void *)desc
, 0, sizeof(struct hclge_desc
));
92 desc
->opcode
= cpu_to_le16(opcode
);
93 desc
->flag
= cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR
| HCLGE_CMD_FLAG_IN
);
96 desc
->flag
|= cpu_to_le16(HCLGE_CMD_FLAG_WR
);
99 static void hclge_cmd_config_regs(struct hclge_cmq_ring
*ring
)
101 dma_addr_t dma
= ring
->desc_dma_addr
;
102 struct hclge_dev
*hdev
= ring
->dev
;
103 struct hclge_hw
*hw
= &hdev
->hw
;
106 if (ring
->ring_type
== HCLGE_TYPE_CSQ
) {
107 hclge_write_dev(hw
, HCLGE_NIC_CSQ_BASEADDR_L_REG
,
109 hclge_write_dev(hw
, HCLGE_NIC_CSQ_BASEADDR_H_REG
,
111 reg_val
= hclge_read_dev(hw
, HCLGE_NIC_CSQ_DEPTH_REG
);
112 reg_val
&= HCLGE_NIC_SW_RST_RDY
;
113 reg_val
|= ring
->desc_num
>> HCLGE_NIC_CMQ_DESC_NUM_S
;
114 hclge_write_dev(hw
, HCLGE_NIC_CSQ_DEPTH_REG
, reg_val
);
115 hclge_write_dev(hw
, HCLGE_NIC_CSQ_HEAD_REG
, 0);
116 hclge_write_dev(hw
, HCLGE_NIC_CSQ_TAIL_REG
, 0);
118 hclge_write_dev(hw
, HCLGE_NIC_CRQ_BASEADDR_L_REG
,
120 hclge_write_dev(hw
, HCLGE_NIC_CRQ_BASEADDR_H_REG
,
122 hclge_write_dev(hw
, HCLGE_NIC_CRQ_DEPTH_REG
,
123 ring
->desc_num
>> HCLGE_NIC_CMQ_DESC_NUM_S
);
124 hclge_write_dev(hw
, HCLGE_NIC_CRQ_HEAD_REG
, 0);
125 hclge_write_dev(hw
, HCLGE_NIC_CRQ_TAIL_REG
, 0);
129 static void hclge_cmd_init_regs(struct hclge_hw
*hw
)
131 hclge_cmd_config_regs(&hw
->cmq
.csq
);
132 hclge_cmd_config_regs(&hw
->cmq
.crq
);
135 static int hclge_cmd_csq_clean(struct hclge_hw
*hw
)
137 struct hclge_dev
*hdev
= container_of(hw
, struct hclge_dev
, hw
);
138 struct hclge_cmq_ring
*csq
= &hw
->cmq
.csq
;
142 head
= hclge_read_dev(hw
, HCLGE_NIC_CSQ_HEAD_REG
);
143 rmb(); /* Make sure head is ready before touch any data */
145 if (!is_valid_csq_clean_head(csq
, head
)) {
146 dev_warn(&hdev
->pdev
->dev
, "wrong cmd head (%u, %d-%d)\n", head
,
147 csq
->next_to_use
, csq
->next_to_clean
);
148 dev_warn(&hdev
->pdev
->dev
,
149 "Disabling any further commands to IMP firmware\n");
150 set_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
151 dev_warn(&hdev
->pdev
->dev
,
152 "IMP firmware watchdog reset soon expected!\n");
156 clean
= (head
- csq
->next_to_clean
+ csq
->desc_num
) % csq
->desc_num
;
157 csq
->next_to_clean
= head
;
161 static int hclge_cmd_csq_done(struct hclge_hw
*hw
)
163 u32 head
= hclge_read_dev(hw
, HCLGE_NIC_CSQ_HEAD_REG
);
164 return head
== hw
->cmq
.csq
.next_to_use
;
167 static bool hclge_is_special_opcode(u16 opcode
)
169 /* these commands have several descriptors,
170 * and use the first one to save opcode and return value
172 u16 spec_opcode
[] = {HCLGE_OPC_STATS_64_BIT
,
173 HCLGE_OPC_STATS_32_BIT
,
175 HCLGE_OPC_STATS_MAC_ALL
,
176 HCLGE_OPC_QUERY_32_BIT_REG
,
177 HCLGE_OPC_QUERY_64_BIT_REG
,
178 HCLGE_QUERY_CLEAR_MPF_RAS_INT
,
179 HCLGE_QUERY_CLEAR_PF_RAS_INT
,
180 HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT
,
181 HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT
};
184 for (i
= 0; i
< ARRAY_SIZE(spec_opcode
); i
++) {
185 if (spec_opcode
[i
] == opcode
)
192 static int hclge_cmd_convert_err_code(u16 desc_ret
)
195 case HCLGE_CMD_EXEC_SUCCESS
:
197 case HCLGE_CMD_NO_AUTH
:
199 case HCLGE_CMD_NOT_SUPPORTED
:
201 case HCLGE_CMD_QUEUE_FULL
:
203 case HCLGE_CMD_NEXT_ERR
:
205 case HCLGE_CMD_UNEXE_ERR
:
207 case HCLGE_CMD_PARA_ERR
:
209 case HCLGE_CMD_RESULT_ERR
:
211 case HCLGE_CMD_TIMEOUT
:
213 case HCLGE_CMD_HILINK_ERR
:
215 case HCLGE_CMD_QUEUE_ILLEGAL
:
217 case HCLGE_CMD_INVALID
:
224 static int hclge_cmd_check_retval(struct hclge_hw
*hw
, struct hclge_desc
*desc
,
227 u16 opcode
, desc_ret
;
230 opcode
= le16_to_cpu(desc
[0].opcode
);
231 for (handle
= 0; handle
< num
; handle
++) {
232 desc
[handle
] = hw
->cmq
.csq
.desc
[ntc
];
234 if (ntc
>= hw
->cmq
.csq
.desc_num
)
237 if (likely(!hclge_is_special_opcode(opcode
)))
238 desc_ret
= le16_to_cpu(desc
[num
- 1].retval
);
240 desc_ret
= le16_to_cpu(desc
[0].retval
);
242 hw
->cmq
.last_status
= desc_ret
;
244 return hclge_cmd_convert_err_code(desc_ret
);
248 * hclge_cmd_send - send command to command queue
249 * @hw: pointer to the hw struct
250 * @desc: prefilled descriptor for describing the command
251 * @num : the number of descriptors to be sent
253 * This is the main send command for command queue, it
254 * sends the queue, cleans the queue, etc
256 int hclge_cmd_send(struct hclge_hw
*hw
, struct hclge_desc
*desc
, int num
)
258 struct hclge_dev
*hdev
= container_of(hw
, struct hclge_dev
, hw
);
259 struct hclge_cmq_ring
*csq
= &hw
->cmq
.csq
;
260 struct hclge_desc
*desc_to_use
;
261 bool complete
= false;
267 spin_lock_bh(&hw
->cmq
.csq
.lock
);
269 if (test_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
)) {
270 spin_unlock_bh(&hw
->cmq
.csq
.lock
);
274 if (num
> hclge_ring_space(&hw
->cmq
.csq
)) {
275 /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
276 * need update the SW HEAD pointer csq->next_to_clean
278 csq
->next_to_clean
= hclge_read_dev(hw
, HCLGE_NIC_CSQ_HEAD_REG
);
279 spin_unlock_bh(&hw
->cmq
.csq
.lock
);
284 * Record the location of desc in the ring for this time
285 * which will be use for hardware to write back
287 ntc
= hw
->cmq
.csq
.next_to_use
;
288 while (handle
< num
) {
289 desc_to_use
= &hw
->cmq
.csq
.desc
[hw
->cmq
.csq
.next_to_use
];
290 *desc_to_use
= desc
[handle
];
291 (hw
->cmq
.csq
.next_to_use
)++;
292 if (hw
->cmq
.csq
.next_to_use
>= hw
->cmq
.csq
.desc_num
)
293 hw
->cmq
.csq
.next_to_use
= 0;
297 /* Write to hardware */
298 hclge_write_dev(hw
, HCLGE_NIC_CSQ_TAIL_REG
, hw
->cmq
.csq
.next_to_use
);
301 * If the command is sync, wait for the firmware to write back,
302 * if multi descriptors to be sent, use the first one to check
304 if (HCLGE_SEND_SYNC(le16_to_cpu(desc
->flag
))) {
306 if (hclge_cmd_csq_done(hw
)) {
312 } while (timeout
< hw
->cmq
.tx_timeout
);
318 retval
= hclge_cmd_check_retval(hw
, desc
, num
, ntc
);
320 /* Clean the command send queue */
321 handle
= hclge_cmd_csq_clean(hw
);
324 else if (handle
!= num
)
325 dev_warn(&hdev
->pdev
->dev
,
326 "cleaned %d, need to clean %d\n", handle
, num
);
328 spin_unlock_bh(&hw
->cmq
.csq
.lock
);
333 static void hclge_set_default_capability(struct hclge_dev
*hdev
)
335 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
337 set_bit(HNAE3_DEV_SUPPORT_FD_B
, ae_dev
->caps
);
338 set_bit(HNAE3_DEV_SUPPORT_GRO_B
, ae_dev
->caps
);
339 set_bit(HNAE3_DEV_SUPPORT_FEC_B
, ae_dev
->caps
);
342 static void hclge_parse_capability(struct hclge_dev
*hdev
,
343 struct hclge_query_version_cmd
*cmd
)
345 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
348 caps
= __le32_to_cpu(cmd
->caps
[0]);
350 if (hnae3_get_bit(caps
, HCLGE_CAP_UDP_GSO_B
))
351 set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B
, ae_dev
->caps
);
352 if (hnae3_get_bit(caps
, HCLGE_CAP_PTP_B
))
353 set_bit(HNAE3_DEV_SUPPORT_PTP_B
, ae_dev
->caps
);
354 if (hnae3_get_bit(caps
, HCLGE_CAP_INT_QL_B
))
355 set_bit(HNAE3_DEV_SUPPORT_INT_QL_B
, ae_dev
->caps
);
356 if (hnae3_get_bit(caps
, HCLGE_CAP_TQP_TXRX_INDEP_B
))
357 set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B
, ae_dev
->caps
);
358 if (hnae3_get_bit(caps
, HCLGE_CAP_HW_TX_CSUM_B
))
359 set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B
, ae_dev
->caps
);
360 if (hnae3_get_bit(caps
, HCLGE_CAP_UDP_TUNNEL_CSUM_B
))
361 set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B
, ae_dev
->caps
);
362 if (hnae3_get_bit(caps
, HCLGE_CAP_FD_FORWARD_TC_B
))
363 set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B
, ae_dev
->caps
);
366 static enum hclge_cmd_status
367 hclge_cmd_query_version_and_capability(struct hclge_dev
*hdev
)
369 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
370 struct hclge_query_version_cmd
*resp
;
371 struct hclge_desc desc
;
374 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_FW_VER
, 1);
375 resp
= (struct hclge_query_version_cmd
*)desc
.data
;
377 ret
= hclge_cmd_send(&hdev
->hw
, &desc
, 1);
381 hdev
->fw_version
= le32_to_cpu(resp
->firmware
);
383 ae_dev
->dev_version
= le32_to_cpu(resp
->hardware
) <<
384 HNAE3_PCI_REVISION_BIT_SIZE
;
385 ae_dev
->dev_version
|= hdev
->pdev
->revision
;
387 if (ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
)
388 hclge_set_default_capability(hdev
);
390 hclge_parse_capability(hdev
, resp
);
395 int hclge_cmd_queue_init(struct hclge_dev
*hdev
)
399 /* Setup the lock for command queue */
400 spin_lock_init(&hdev
->hw
.cmq
.csq
.lock
);
401 spin_lock_init(&hdev
->hw
.cmq
.crq
.lock
);
403 /* Setup the queue entries for use cmd queue */
404 hdev
->hw
.cmq
.csq
.desc_num
= HCLGE_NIC_CMQ_DESC_NUM
;
405 hdev
->hw
.cmq
.crq
.desc_num
= HCLGE_NIC_CMQ_DESC_NUM
;
407 /* Setup Tx write back timeout */
408 hdev
->hw
.cmq
.tx_timeout
= HCLGE_CMDQ_TX_TIMEOUT
;
410 /* Setup queue rings */
411 ret
= hclge_alloc_cmd_queue(hdev
, HCLGE_TYPE_CSQ
);
413 dev_err(&hdev
->pdev
->dev
,
414 "CSQ ring setup error %d\n", ret
);
418 ret
= hclge_alloc_cmd_queue(hdev
, HCLGE_TYPE_CRQ
);
420 dev_err(&hdev
->pdev
->dev
,
421 "CRQ ring setup error %d\n", ret
);
427 hclge_free_cmd_desc(&hdev
->hw
.cmq
.csq
);
431 static int hclge_firmware_compat_config(struct hclge_dev
*hdev
)
433 struct hclge_firmware_compat_cmd
*req
;
434 struct hclge_desc desc
;
437 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_M7_COMPAT_CFG
, false);
439 req
= (struct hclge_firmware_compat_cmd
*)desc
.data
;
441 hnae3_set_bit(compat
, HCLGE_LINK_EVENT_REPORT_EN_B
, 1);
442 hnae3_set_bit(compat
, HCLGE_NCSI_ERROR_REPORT_EN_B
, 1);
443 req
->compat
= cpu_to_le32(compat
);
445 return hclge_cmd_send(&hdev
->hw
, &desc
, 1);
448 int hclge_cmd_init(struct hclge_dev
*hdev
)
452 spin_lock_bh(&hdev
->hw
.cmq
.csq
.lock
);
453 spin_lock(&hdev
->hw
.cmq
.crq
.lock
);
455 hdev
->hw
.cmq
.csq
.next_to_clean
= 0;
456 hdev
->hw
.cmq
.csq
.next_to_use
= 0;
457 hdev
->hw
.cmq
.crq
.next_to_clean
= 0;
458 hdev
->hw
.cmq
.crq
.next_to_use
= 0;
460 hclge_cmd_init_regs(&hdev
->hw
);
462 spin_unlock(&hdev
->hw
.cmq
.crq
.lock
);
463 spin_unlock_bh(&hdev
->hw
.cmq
.csq
.lock
);
465 clear_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
467 /* Check if there is new reset pending, because the higher level
468 * reset may happen when lower level reset is being processed.
470 if ((hclge_is_reset_pending(hdev
))) {
471 dev_err(&hdev
->pdev
->dev
,
472 "failed to init cmd since reset %#lx pending\n",
473 hdev
->reset_pending
);
478 /* get version and device capabilities */
479 ret
= hclge_cmd_query_version_and_capability(hdev
);
481 dev_err(&hdev
->pdev
->dev
,
482 "failed to query version and capabilities, ret = %d\n",
487 dev_info(&hdev
->pdev
->dev
, "The firmware version is %lu.%lu.%lu.%lu\n",
488 hnae3_get_field(hdev
->fw_version
, HNAE3_FW_VERSION_BYTE3_MASK
,
489 HNAE3_FW_VERSION_BYTE3_SHIFT
),
490 hnae3_get_field(hdev
->fw_version
, HNAE3_FW_VERSION_BYTE2_MASK
,
491 HNAE3_FW_VERSION_BYTE2_SHIFT
),
492 hnae3_get_field(hdev
->fw_version
, HNAE3_FW_VERSION_BYTE1_MASK
,
493 HNAE3_FW_VERSION_BYTE1_SHIFT
),
494 hnae3_get_field(hdev
->fw_version
, HNAE3_FW_VERSION_BYTE0_MASK
,
495 HNAE3_FW_VERSION_BYTE0_SHIFT
));
497 /* ask the firmware to enable some features, driver can work without
500 ret
= hclge_firmware_compat_config(hdev
);
502 dev_warn(&hdev
->pdev
->dev
,
503 "Firmware compatible features not enabled(%d).\n",
509 set_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
514 static void hclge_cmd_uninit_regs(struct hclge_hw
*hw
)
516 hclge_write_dev(hw
, HCLGE_NIC_CSQ_BASEADDR_L_REG
, 0);
517 hclge_write_dev(hw
, HCLGE_NIC_CSQ_BASEADDR_H_REG
, 0);
518 hclge_write_dev(hw
, HCLGE_NIC_CSQ_DEPTH_REG
, 0);
519 hclge_write_dev(hw
, HCLGE_NIC_CSQ_HEAD_REG
, 0);
520 hclge_write_dev(hw
, HCLGE_NIC_CSQ_TAIL_REG
, 0);
521 hclge_write_dev(hw
, HCLGE_NIC_CRQ_BASEADDR_L_REG
, 0);
522 hclge_write_dev(hw
, HCLGE_NIC_CRQ_BASEADDR_H_REG
, 0);
523 hclge_write_dev(hw
, HCLGE_NIC_CRQ_DEPTH_REG
, 0);
524 hclge_write_dev(hw
, HCLGE_NIC_CRQ_HEAD_REG
, 0);
525 hclge_write_dev(hw
, HCLGE_NIC_CRQ_TAIL_REG
, 0);
528 void hclge_cmd_uninit(struct hclge_dev
*hdev
)
530 spin_lock_bh(&hdev
->hw
.cmq
.csq
.lock
);
531 spin_lock(&hdev
->hw
.cmq
.crq
.lock
);
532 set_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
533 hclge_cmd_uninit_regs(&hdev
->hw
);
534 spin_unlock(&hdev
->hw
.cmq
.crq
.lock
);
535 spin_unlock_bh(&hdev
->hw
.cmq
.csq
.lock
);
537 hclge_free_cmd_desc(&hdev
->hw
.cmq
.csq
);
538 hclge_free_cmd_desc(&hdev
->hw
.cmq
.crq
);