2 * Copyright (c) 2016~2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/dma-mapping.h>
11 #include <linux/slab.h>
12 #include <linux/pci.h>
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/dma-direction.h>
16 #include "hclge_cmd.h"
18 #include "hclge_main.h"
20 #define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
22 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
24 static int hclge_ring_space(struct hclge_cmq_ring
*ring
)
26 int ntu
= ring
->next_to_use
;
27 int ntc
= ring
->next_to_clean
;
28 int used
= (ntu
- ntc
+ ring
->desc_num
) % ring
->desc_num
;
30 return ring
->desc_num
- used
- 1;
33 static int is_valid_csq_clean_head(struct hclge_cmq_ring
*ring
, int h
)
35 int u
= ring
->next_to_use
;
36 int c
= ring
->next_to_clean
;
38 if (unlikely(h
>= ring
->desc_num
))
41 return u
> c
? (h
> c
&& h
<= u
) : (h
> c
|| h
<= u
);
44 static int hclge_alloc_cmd_desc(struct hclge_cmq_ring
*ring
)
46 int size
= ring
->desc_num
* sizeof(struct hclge_desc
);
48 ring
->desc
= dma_zalloc_coherent(cmq_ring_to_dev(ring
),
49 size
, &ring
->desc_dma_addr
,
57 static void hclge_free_cmd_desc(struct hclge_cmq_ring
*ring
)
59 int size
= ring
->desc_num
* sizeof(struct hclge_desc
);
62 dma_free_coherent(cmq_ring_to_dev(ring
), size
,
63 ring
->desc
, ring
->desc_dma_addr
);
68 static int hclge_alloc_cmd_queue(struct hclge_dev
*hdev
, int ring_type
)
70 struct hclge_hw
*hw
= &hdev
->hw
;
71 struct hclge_cmq_ring
*ring
=
72 (ring_type
== HCLGE_TYPE_CSQ
) ? &hw
->cmq
.csq
: &hw
->cmq
.crq
;
75 ring
->ring_type
= ring_type
;
78 ret
= hclge_alloc_cmd_desc(ring
);
80 dev_err(&hdev
->pdev
->dev
, "descriptor %s alloc error %d\n",
81 (ring_type
== HCLGE_TYPE_CSQ
) ? "CSQ" : "CRQ", ret
);
88 void hclge_cmd_reuse_desc(struct hclge_desc
*desc
, bool is_read
)
90 desc
->flag
= cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR
| HCLGE_CMD_FLAG_IN
);
92 desc
->flag
|= cpu_to_le16(HCLGE_CMD_FLAG_WR
);
94 desc
->flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_WR
);
97 void hclge_cmd_setup_basic_desc(struct hclge_desc
*desc
,
98 enum hclge_opcode_type opcode
, bool is_read
)
100 memset((void *)desc
, 0, sizeof(struct hclge_desc
));
101 desc
->opcode
= cpu_to_le16(opcode
);
102 desc
->flag
= cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR
| HCLGE_CMD_FLAG_IN
);
105 desc
->flag
|= cpu_to_le16(HCLGE_CMD_FLAG_WR
);
108 static void hclge_cmd_config_regs(struct hclge_cmq_ring
*ring
)
110 dma_addr_t dma
= ring
->desc_dma_addr
;
111 struct hclge_dev
*hdev
= ring
->dev
;
112 struct hclge_hw
*hw
= &hdev
->hw
;
114 if (ring
->ring_type
== HCLGE_TYPE_CSQ
) {
115 hclge_write_dev(hw
, HCLGE_NIC_CSQ_BASEADDR_L_REG
,
117 hclge_write_dev(hw
, HCLGE_NIC_CSQ_BASEADDR_H_REG
,
119 hclge_write_dev(hw
, HCLGE_NIC_CSQ_DEPTH_REG
,
120 (ring
->desc_num
>> HCLGE_NIC_CMQ_DESC_NUM_S
) |
121 HCLGE_NIC_CMQ_ENABLE
);
122 hclge_write_dev(hw
, HCLGE_NIC_CSQ_HEAD_REG
, 0);
123 hclge_write_dev(hw
, HCLGE_NIC_CSQ_TAIL_REG
, 0);
125 hclge_write_dev(hw
, HCLGE_NIC_CRQ_BASEADDR_L_REG
,
127 hclge_write_dev(hw
, HCLGE_NIC_CRQ_BASEADDR_H_REG
,
129 hclge_write_dev(hw
, HCLGE_NIC_CRQ_DEPTH_REG
,
130 (ring
->desc_num
>> HCLGE_NIC_CMQ_DESC_NUM_S
) |
131 HCLGE_NIC_CMQ_ENABLE
);
132 hclge_write_dev(hw
, HCLGE_NIC_CRQ_HEAD_REG
, 0);
133 hclge_write_dev(hw
, HCLGE_NIC_CRQ_TAIL_REG
, 0);
137 static void hclge_cmd_init_regs(struct hclge_hw
*hw
)
139 hclge_cmd_config_regs(&hw
->cmq
.csq
);
140 hclge_cmd_config_regs(&hw
->cmq
.crq
);
143 static int hclge_cmd_csq_clean(struct hclge_hw
*hw
)
145 struct hclge_dev
*hdev
= container_of(hw
, struct hclge_dev
, hw
);
146 struct hclge_cmq_ring
*csq
= &hw
->cmq
.csq
;
150 head
= hclge_read_dev(hw
, HCLGE_NIC_CSQ_HEAD_REG
);
151 rmb(); /* Make sure head is ready before touch any data */
153 if (!is_valid_csq_clean_head(csq
, head
)) {
154 dev_warn(&hdev
->pdev
->dev
, "wrong cmd head (%d, %d-%d)\n", head
,
155 csq
->next_to_use
, csq
->next_to_clean
);
159 clean
= (head
- csq
->next_to_clean
+ csq
->desc_num
) % csq
->desc_num
;
160 csq
->next_to_clean
= head
;
164 static int hclge_cmd_csq_done(struct hclge_hw
*hw
)
166 u32 head
= hclge_read_dev(hw
, HCLGE_NIC_CSQ_HEAD_REG
);
167 return head
== hw
->cmq
.csq
.next_to_use
;
170 static bool hclge_is_special_opcode(u16 opcode
)
172 /* these commands have several descriptors,
173 * and use the first one to save opcode and return value
175 u16 spec_opcode
[3] = {HCLGE_OPC_STATS_64_BIT
,
176 HCLGE_OPC_STATS_32_BIT
, HCLGE_OPC_STATS_MAC
};
179 for (i
= 0; i
< ARRAY_SIZE(spec_opcode
); i
++) {
180 if (spec_opcode
[i
] == opcode
)
188 * hclge_cmd_send - send command to command queue
189 * @hw: pointer to the hw struct
190 * @desc: prefilled descriptor for describing the command
191 * @num : the number of descriptors to be sent
193 * This is the main send command for command queue, it
194 * sends the queue, cleans the queue, etc
196 int hclge_cmd_send(struct hclge_hw
*hw
, struct hclge_desc
*desc
, int num
)
198 struct hclge_dev
*hdev
= container_of(hw
, struct hclge_dev
, hw
);
199 struct hclge_desc
*desc_to_use
;
200 bool complete
= false;
204 u16 opcode
, desc_ret
;
207 spin_lock_bh(&hw
->cmq
.csq
.lock
);
209 if (num
> hclge_ring_space(&hw
->cmq
.csq
) ||
210 test_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
)) {
211 spin_unlock_bh(&hw
->cmq
.csq
.lock
);
216 * Record the location of desc in the ring for this time
217 * which will be use for hardware to write back
219 ntc
= hw
->cmq
.csq
.next_to_use
;
220 opcode
= le16_to_cpu(desc
[0].opcode
);
221 while (handle
< num
) {
222 desc_to_use
= &hw
->cmq
.csq
.desc
[hw
->cmq
.csq
.next_to_use
];
223 *desc_to_use
= desc
[handle
];
224 (hw
->cmq
.csq
.next_to_use
)++;
225 if (hw
->cmq
.csq
.next_to_use
== hw
->cmq
.csq
.desc_num
)
226 hw
->cmq
.csq
.next_to_use
= 0;
230 /* Write to hardware */
231 hclge_write_dev(hw
, HCLGE_NIC_CSQ_TAIL_REG
, hw
->cmq
.csq
.next_to_use
);
234 * If the command is sync, wait for the firmware to write back,
235 * if multi descriptors to be sent, use the first one to check
237 if (HCLGE_SEND_SYNC(le16_to_cpu(desc
->flag
))) {
239 if (hclge_cmd_csq_done(hw
)) {
245 } while (timeout
< hw
->cmq
.tx_timeout
);
252 while (handle
< num
) {
253 /* Get the result of hardware write back */
254 desc_to_use
= &hw
->cmq
.csq
.desc
[ntc
];
255 desc
[handle
] = *desc_to_use
;
257 if (likely(!hclge_is_special_opcode(opcode
)))
258 desc_ret
= le16_to_cpu(desc
[handle
].retval
);
260 desc_ret
= le16_to_cpu(desc
[0].retval
);
262 if (desc_ret
== HCLGE_CMD_EXEC_SUCCESS
)
266 hw
->cmq
.last_status
= desc_ret
;
269 if (ntc
== hw
->cmq
.csq
.desc_num
)
274 /* Clean the command send queue */
275 handle
= hclge_cmd_csq_clean(hw
);
277 dev_warn(&hdev
->pdev
->dev
,
278 "cleaned %d, need to clean %d\n", handle
, num
);
281 spin_unlock_bh(&hw
->cmq
.csq
.lock
);
286 static enum hclge_cmd_status
hclge_cmd_query_firmware_version(
287 struct hclge_hw
*hw
, u32
*version
)
289 struct hclge_query_version_cmd
*resp
;
290 struct hclge_desc desc
;
293 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_FW_VER
, 1);
294 resp
= (struct hclge_query_version_cmd
*)desc
.data
;
296 ret
= hclge_cmd_send(hw
, &desc
, 1);
298 *version
= le32_to_cpu(resp
->firmware
);
303 int hclge_cmd_queue_init(struct hclge_dev
*hdev
)
307 /* Setup the queue entries for use cmd queue */
308 hdev
->hw
.cmq
.csq
.desc_num
= HCLGE_NIC_CMQ_DESC_NUM
;
309 hdev
->hw
.cmq
.crq
.desc_num
= HCLGE_NIC_CMQ_DESC_NUM
;
311 /* Setup Tx write back timeout */
312 hdev
->hw
.cmq
.tx_timeout
= HCLGE_CMDQ_TX_TIMEOUT
;
314 /* Setup queue rings */
315 ret
= hclge_alloc_cmd_queue(hdev
, HCLGE_TYPE_CSQ
);
317 dev_err(&hdev
->pdev
->dev
,
318 "CSQ ring setup error %d\n", ret
);
322 ret
= hclge_alloc_cmd_queue(hdev
, HCLGE_TYPE_CRQ
);
324 dev_err(&hdev
->pdev
->dev
,
325 "CRQ ring setup error %d\n", ret
);
331 hclge_free_cmd_desc(&hdev
->hw
.cmq
.csq
);
335 int hclge_cmd_init(struct hclge_dev
*hdev
)
340 hdev
->hw
.cmq
.csq
.next_to_clean
= 0;
341 hdev
->hw
.cmq
.csq
.next_to_use
= 0;
342 hdev
->hw
.cmq
.crq
.next_to_clean
= 0;
343 hdev
->hw
.cmq
.crq
.next_to_use
= 0;
345 /* Setup the lock for command queue */
346 spin_lock_init(&hdev
->hw
.cmq
.csq
.lock
);
347 spin_lock_init(&hdev
->hw
.cmq
.crq
.lock
);
349 hclge_cmd_init_regs(&hdev
->hw
);
350 clear_bit(HCLGE_STATE_CMD_DISABLE
, &hdev
->state
);
352 ret
= hclge_cmd_query_firmware_version(&hdev
->hw
, &version
);
354 dev_err(&hdev
->pdev
->dev
,
355 "firmware version query failed %d\n", ret
);
358 hdev
->fw_version
= version
;
360 dev_info(&hdev
->pdev
->dev
, "The firmware version is %08x\n", version
);
365 static void hclge_destroy_queue(struct hclge_cmq_ring
*ring
)
367 spin_lock(&ring
->lock
);
368 hclge_free_cmd_desc(ring
);
369 spin_unlock(&ring
->lock
);
372 void hclge_destroy_cmd_queue(struct hclge_hw
*hw
)
374 hclge_destroy_queue(&hw
->cmq
.csq
);
375 hclge_destroy_queue(&hw
->cmq
.crq
);