1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/device.h>
5 #include <linux/dma-direction.h>
6 #include <linux/dma-mapping.h>
9 #include <linux/slab.h>
10 #include "hclgevf_cmd.h"
11 #include "hclgevf_main.h"
14 #define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ)
15 #define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \
16 DMA_TO_DEVICE : DMA_FROM_DEVICE)
17 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
19 static int hclgevf_ring_space(struct hclgevf_cmq_ring
*ring
)
21 int ntc
= ring
->next_to_clean
;
22 int ntu
= ring
->next_to_use
;
25 used
= (ntu
- ntc
+ ring
->desc_num
) % ring
->desc_num
;
27 return ring
->desc_num
- used
- 1;
30 static int hclgevf_cmd_csq_clean(struct hclgevf_hw
*hw
)
32 struct hclgevf_cmq_ring
*csq
= &hw
->cmq
.csq
;
33 u16 ntc
= csq
->next_to_clean
;
34 struct hclgevf_desc
*desc
;
38 desc
= &csq
->desc
[ntc
];
39 head
= hclgevf_read_dev(hw
, HCLGEVF_NIC_CSQ_HEAD_REG
);
41 memset(desc
, 0, sizeof(*desc
));
43 if (ntc
== csq
->desc_num
)
45 desc
= &csq
->desc
[ntc
];
48 csq
->next_to_clean
= ntc
;
53 static bool hclgevf_cmd_csq_done(struct hclgevf_hw
*hw
)
57 head
= hclgevf_read_dev(hw
, HCLGEVF_NIC_CSQ_HEAD_REG
);
59 return head
== hw
->cmq
.csq
.next_to_use
;
62 static bool hclgevf_is_special_opcode(u16 opcode
)
64 u16 spec_opcode
[] = {0x30, 0x31, 0x32};
67 for (i
= 0; i
< ARRAY_SIZE(spec_opcode
); i
++) {
68 if (spec_opcode
[i
] == opcode
)
75 static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring
*ring
)
77 struct hclgevf_dev
*hdev
= ring
->dev
;
78 struct hclgevf_hw
*hw
= &hdev
->hw
;
81 if (ring
->flag
== HCLGEVF_TYPE_CSQ
) {
82 reg_val
= (u32
)ring
->desc_dma_addr
;
83 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_BASEADDR_L_REG
, reg_val
);
84 reg_val
= (u32
)((ring
->desc_dma_addr
>> 31) >> 1);
85 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_BASEADDR_H_REG
, reg_val
);
87 reg_val
= (ring
->desc_num
>> HCLGEVF_NIC_CMQ_DESC_NUM_S
);
88 reg_val
|= HCLGEVF_NIC_CMQ_ENABLE
;
89 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_DEPTH_REG
, reg_val
);
91 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_HEAD_REG
, 0);
92 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_TAIL_REG
, 0);
94 reg_val
= (u32
)ring
->desc_dma_addr
;
95 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_BASEADDR_L_REG
, reg_val
);
96 reg_val
= (u32
)((ring
->desc_dma_addr
>> 31) >> 1);
97 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_BASEADDR_H_REG
, reg_val
);
99 reg_val
= (ring
->desc_num
>> HCLGEVF_NIC_CMQ_DESC_NUM_S
);
100 reg_val
|= HCLGEVF_NIC_CMQ_ENABLE
;
101 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_DEPTH_REG
, reg_val
);
103 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_HEAD_REG
, 0);
104 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_TAIL_REG
, 0);
108 static void hclgevf_cmd_init_regs(struct hclgevf_hw
*hw
)
110 hclgevf_cmd_config_regs(&hw
->cmq
.csq
);
111 hclgevf_cmd_config_regs(&hw
->cmq
.crq
);
114 static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring
*ring
)
116 int size
= ring
->desc_num
* sizeof(struct hclgevf_desc
);
118 ring
->desc
= dma_zalloc_coherent(cmq_ring_to_dev(ring
),
119 size
, &ring
->desc_dma_addr
,
127 static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring
*ring
)
129 int size
= ring
->desc_num
* sizeof(struct hclgevf_desc
);
132 dma_free_coherent(cmq_ring_to_dev(ring
), size
,
133 ring
->desc
, ring
->desc_dma_addr
);
138 static int hclgevf_alloc_cmd_queue(struct hclgevf_dev
*hdev
, int ring_type
)
140 struct hclgevf_hw
*hw
= &hdev
->hw
;
141 struct hclgevf_cmq_ring
*ring
=
142 (ring_type
== HCLGEVF_TYPE_CSQ
) ? &hw
->cmq
.csq
: &hw
->cmq
.crq
;
146 ring
->flag
= ring_type
;
148 /* allocate CSQ/CRQ descriptor */
149 ret
= hclgevf_alloc_cmd_desc(ring
);
151 dev_err(&hdev
->pdev
->dev
, "failed(%d) to alloc %s desc\n", ret
,
152 (ring_type
== HCLGEVF_TYPE_CSQ
) ? "CSQ" : "CRQ");
157 void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc
*desc
,
158 enum hclgevf_opcode_type opcode
, bool is_read
)
160 memset(desc
, 0, sizeof(struct hclgevf_desc
));
161 desc
->opcode
= cpu_to_le16(opcode
);
162 desc
->flag
= cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR
|
163 HCLGEVF_CMD_FLAG_IN
);
165 desc
->flag
|= cpu_to_le16(HCLGEVF_CMD_FLAG_WR
);
167 desc
->flag
&= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR
);
170 /* hclgevf_cmd_send - send command to command queue
171 * @hw: pointer to the hw struct
172 * @desc: prefilled descriptor for describing the command
173 * @num : the number of descriptors to be sent
175 * This is the main send command for command queue, it
176 * sends the queue, cleans the queue, etc
178 int hclgevf_cmd_send(struct hclgevf_hw
*hw
, struct hclgevf_desc
*desc
, int num
)
180 struct hclgevf_dev
*hdev
= (struct hclgevf_dev
*)hw
->hdev
;
181 struct hclgevf_desc
*desc_to_use
;
182 bool complete
= false;
190 spin_lock_bh(&hw
->cmq
.csq
.lock
);
192 if (num
> hclgevf_ring_space(&hw
->cmq
.csq
) ||
193 test_bit(HCLGEVF_STATE_CMD_DISABLE
, &hdev
->state
)) {
194 spin_unlock_bh(&hw
->cmq
.csq
.lock
);
198 /* Record the location of desc in the ring for this time
199 * which will be use for hardware to write back
201 ntc
= hw
->cmq
.csq
.next_to_use
;
202 opcode
= le16_to_cpu(desc
[0].opcode
);
203 while (handle
< num
) {
204 desc_to_use
= &hw
->cmq
.csq
.desc
[hw
->cmq
.csq
.next_to_use
];
205 *desc_to_use
= desc
[handle
];
206 (hw
->cmq
.csq
.next_to_use
)++;
207 if (hw
->cmq
.csq
.next_to_use
== hw
->cmq
.csq
.desc_num
)
208 hw
->cmq
.csq
.next_to_use
= 0;
212 /* Write to hardware */
213 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_TAIL_REG
,
214 hw
->cmq
.csq
.next_to_use
);
216 /* If the command is sync, wait for the firmware to write back,
217 * if multi descriptors to be sent, use the first one to check
219 if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc
->flag
))) {
221 if (hclgevf_cmd_csq_done(hw
))
225 } while (timeout
< hw
->cmq
.tx_timeout
);
228 if (hclgevf_cmd_csq_done(hw
)) {
232 while (handle
< num
) {
233 /* Get the result of hardware write back */
234 desc_to_use
= &hw
->cmq
.csq
.desc
[ntc
];
235 desc
[handle
] = *desc_to_use
;
237 if (likely(!hclgevf_is_special_opcode(opcode
)))
238 retval
= le16_to_cpu(desc
[handle
].retval
);
240 retval
= le16_to_cpu(desc
[0].retval
);
242 if ((enum hclgevf_cmd_return_status
)retval
==
243 HCLGEVF_CMD_EXEC_SUCCESS
)
247 hw
->cmq
.last_status
= (enum hclgevf_cmd_status
)retval
;
250 if (ntc
== hw
->cmq
.csq
.desc_num
)
258 /* Clean the command send queue */
259 handle
= hclgevf_cmd_csq_clean(hw
);
261 dev_warn(&hdev
->pdev
->dev
,
262 "cleaned %d, need to clean %d\n", handle
, num
);
265 spin_unlock_bh(&hw
->cmq
.csq
.lock
);
270 static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw
*hw
,
273 struct hclgevf_query_version_cmd
*resp
;
274 struct hclgevf_desc desc
;
277 resp
= (struct hclgevf_query_version_cmd
*)desc
.data
;
279 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_QUERY_FW_VER
, 1);
280 status
= hclgevf_cmd_send(hw
, &desc
, 1);
282 *version
= le32_to_cpu(resp
->firmware
);
287 int hclgevf_cmd_queue_init(struct hclgevf_dev
*hdev
)
291 /* Setup the lock for command queue */
292 spin_lock_init(&hdev
->hw
.cmq
.csq
.lock
);
293 spin_lock_init(&hdev
->hw
.cmq
.crq
.lock
);
295 hdev
->hw
.cmq
.tx_timeout
= HCLGEVF_CMDQ_TX_TIMEOUT
;
296 hdev
->hw
.cmq
.csq
.desc_num
= HCLGEVF_NIC_CMQ_DESC_NUM
;
297 hdev
->hw
.cmq
.crq
.desc_num
= HCLGEVF_NIC_CMQ_DESC_NUM
;
299 ret
= hclgevf_alloc_cmd_queue(hdev
, HCLGEVF_TYPE_CSQ
);
301 dev_err(&hdev
->pdev
->dev
,
302 "CSQ ring setup error %d\n", ret
);
306 ret
= hclgevf_alloc_cmd_queue(hdev
, HCLGEVF_TYPE_CRQ
);
308 dev_err(&hdev
->pdev
->dev
,
309 "CRQ ring setup error %d\n", ret
);
315 hclgevf_free_cmd_desc(&hdev
->hw
.cmq
.csq
);
319 int hclgevf_cmd_init(struct hclgevf_dev
*hdev
)
324 spin_lock_bh(&hdev
->hw
.cmq
.csq
.lock
);
325 spin_lock_bh(&hdev
->hw
.cmq
.crq
.lock
);
327 /* initialize the pointers of async rx queue of mailbox */
328 hdev
->arq
.hdev
= hdev
;
332 hdev
->hw
.cmq
.csq
.next_to_clean
= 0;
333 hdev
->hw
.cmq
.csq
.next_to_use
= 0;
334 hdev
->hw
.cmq
.crq
.next_to_clean
= 0;
335 hdev
->hw
.cmq
.crq
.next_to_use
= 0;
337 hclgevf_cmd_init_regs(&hdev
->hw
);
339 spin_unlock_bh(&hdev
->hw
.cmq
.crq
.lock
);
340 spin_unlock_bh(&hdev
->hw
.cmq
.csq
.lock
);
342 clear_bit(HCLGEVF_STATE_CMD_DISABLE
, &hdev
->state
);
344 /* Check if there is new reset pending, because the higher level
345 * reset may happen when lower level reset is being processed.
347 if (hclgevf_is_reset_pending(hdev
)) {
348 set_bit(HCLGEVF_STATE_CMD_DISABLE
, &hdev
->state
);
352 /* get firmware version */
353 ret
= hclgevf_cmd_query_firmware_version(&hdev
->hw
, &version
);
355 dev_err(&hdev
->pdev
->dev
,
356 "failed(%d) to query firmware version\n", ret
);
359 hdev
->fw_version
= version
;
361 dev_info(&hdev
->pdev
->dev
, "The firmware version is %08x\n", version
);
366 void hclgevf_cmd_uninit(struct hclgevf_dev
*hdev
)
368 hclgevf_free_cmd_desc(&hdev
->hw
.cmq
.csq
);
369 hclgevf_free_cmd_desc(&hdev
->hw
.cmq
.crq
);