2 * Copyright (c) 2016~2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/dma-mapping.h>
11 #include <linux/slab.h>
12 #include <linux/pci.h>
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/dma-direction.h>
16 #include "hclge_cmd.h"
18 #include "hclge_main.h"
20 #define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
22 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
24 static int hclge_ring_space(struct hclge_cmq_ring
*ring
)
26 int ntu
= ring
->next_to_use
;
27 int ntc
= ring
->next_to_clean
;
28 int used
= (ntu
- ntc
+ ring
->desc_num
) % ring
->desc_num
;
30 return ring
->desc_num
- used
- 1;
33 static int is_valid_csq_clean_head(struct hclge_cmq_ring
*ring
, int h
)
35 int u
= ring
->next_to_use
;
36 int c
= ring
->next_to_clean
;
38 if (unlikely(h
>= ring
->desc_num
))
41 return u
> c
? (h
> c
&& h
<= u
) : (h
> c
|| h
<= u
);
44 static int hclge_alloc_cmd_desc(struct hclge_cmq_ring
*ring
)
46 int size
= ring
->desc_num
* sizeof(struct hclge_desc
);
48 ring
->desc
= kzalloc(size
, GFP_KERNEL
);
52 ring
->desc_dma_addr
= dma_map_single(cmq_ring_to_dev(ring
), ring
->desc
,
53 size
, DMA_BIDIRECTIONAL
);
54 if (dma_mapping_error(cmq_ring_to_dev(ring
), ring
->desc_dma_addr
)) {
55 ring
->desc_dma_addr
= 0;
64 static void hclge_free_cmd_desc(struct hclge_cmq_ring
*ring
)
66 dma_unmap_single(cmq_ring_to_dev(ring
), ring
->desc_dma_addr
,
67 ring
->desc_num
* sizeof(ring
->desc
[0]),
70 ring
->desc_dma_addr
= 0;
75 static int hclge_alloc_cmd_queue(struct hclge_dev
*hdev
, int ring_type
)
77 struct hclge_hw
*hw
= &hdev
->hw
;
78 struct hclge_cmq_ring
*ring
=
79 (ring_type
== HCLGE_TYPE_CSQ
) ? &hw
->cmq
.csq
: &hw
->cmq
.crq
;
82 ring
->flag
= ring_type
;
85 ret
= hclge_alloc_cmd_desc(ring
);
87 dev_err(&hdev
->pdev
->dev
, "descriptor %s alloc error %d\n",
88 (ring_type
== HCLGE_TYPE_CSQ
) ? "CSQ" : "CRQ", ret
);
95 void hclge_cmd_reuse_desc(struct hclge_desc
*desc
, bool is_read
)
97 desc
->flag
= cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR
| HCLGE_CMD_FLAG_IN
);
99 desc
->flag
|= cpu_to_le16(HCLGE_CMD_FLAG_WR
);
101 desc
->flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_WR
);
104 void hclge_cmd_setup_basic_desc(struct hclge_desc
*desc
,
105 enum hclge_opcode_type opcode
, bool is_read
)
107 memset((void *)desc
, 0, sizeof(struct hclge_desc
));
108 desc
->opcode
= cpu_to_le16(opcode
);
109 desc
->flag
= cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR
| HCLGE_CMD_FLAG_IN
);
112 desc
->flag
|= cpu_to_le16(HCLGE_CMD_FLAG_WR
);
114 desc
->flag
&= cpu_to_le16(~HCLGE_CMD_FLAG_WR
);
117 static void hclge_cmd_config_regs(struct hclge_cmq_ring
*ring
)
119 dma_addr_t dma
= ring
->desc_dma_addr
;
120 struct hclge_dev
*hdev
= ring
->dev
;
121 struct hclge_hw
*hw
= &hdev
->hw
;
123 if (ring
->flag
== HCLGE_TYPE_CSQ
) {
124 hclge_write_dev(hw
, HCLGE_NIC_CSQ_BASEADDR_L_REG
,
126 hclge_write_dev(hw
, HCLGE_NIC_CSQ_BASEADDR_H_REG
,
128 hclge_write_dev(hw
, HCLGE_NIC_CSQ_DEPTH_REG
,
129 (ring
->desc_num
>> HCLGE_NIC_CMQ_DESC_NUM_S
) |
130 HCLGE_NIC_CMQ_ENABLE
);
131 hclge_write_dev(hw
, HCLGE_NIC_CSQ_TAIL_REG
, 0);
132 hclge_write_dev(hw
, HCLGE_NIC_CSQ_HEAD_REG
, 0);
134 hclge_write_dev(hw
, HCLGE_NIC_CRQ_BASEADDR_L_REG
,
136 hclge_write_dev(hw
, HCLGE_NIC_CRQ_BASEADDR_H_REG
,
138 hclge_write_dev(hw
, HCLGE_NIC_CRQ_DEPTH_REG
,
139 (ring
->desc_num
>> HCLGE_NIC_CMQ_DESC_NUM_S
) |
140 HCLGE_NIC_CMQ_ENABLE
);
141 hclge_write_dev(hw
, HCLGE_NIC_CRQ_TAIL_REG
, 0);
142 hclge_write_dev(hw
, HCLGE_NIC_CRQ_HEAD_REG
, 0);
146 static void hclge_cmd_init_regs(struct hclge_hw
*hw
)
148 hclge_cmd_config_regs(&hw
->cmq
.csq
);
149 hclge_cmd_config_regs(&hw
->cmq
.crq
);
152 static int hclge_cmd_csq_clean(struct hclge_hw
*hw
)
154 struct hclge_dev
*hdev
= container_of(hw
, struct hclge_dev
, hw
);
155 struct hclge_cmq_ring
*csq
= &hw
->cmq
.csq
;
156 u16 ntc
= csq
->next_to_clean
;
157 struct hclge_desc
*desc
;
161 desc
= &csq
->desc
[ntc
];
162 head
= hclge_read_dev(hw
, HCLGE_NIC_CSQ_HEAD_REG
);
163 rmb(); /* Make sure head is ready before touch any data */
165 if (!is_valid_csq_clean_head(csq
, head
)) {
166 dev_warn(&hdev
->pdev
->dev
, "wrong head (%d, %d-%d)\n", head
,
167 csq
->next_to_use
, csq
->next_to_clean
);
171 while (head
!= ntc
) {
172 memset(desc
, 0, sizeof(*desc
));
174 if (ntc
== csq
->desc_num
)
176 desc
= &csq
->desc
[ntc
];
179 csq
->next_to_clean
= ntc
;
184 static int hclge_cmd_csq_done(struct hclge_hw
*hw
)
186 u32 head
= hclge_read_dev(hw
, HCLGE_NIC_CSQ_HEAD_REG
);
187 return head
== hw
->cmq
.csq
.next_to_use
;
190 static bool hclge_is_special_opcode(u16 opcode
)
192 /* these commands have several descriptors,
193 * and use the first one to save opcode and return value
195 u16 spec_opcode
[3] = {HCLGE_OPC_STATS_64_BIT
,
196 HCLGE_OPC_STATS_32_BIT
, HCLGE_OPC_STATS_MAC
};
199 for (i
= 0; i
< ARRAY_SIZE(spec_opcode
); i
++) {
200 if (spec_opcode
[i
] == opcode
)
208 * hclge_cmd_send - send command to command queue
209 * @hw: pointer to the hw struct
210 * @desc: prefilled descriptor for describing the command
211 * @num : the number of descriptors to be sent
213 * This is the main send command for command queue, it
214 * sends the queue, cleans the queue, etc
216 int hclge_cmd_send(struct hclge_hw
*hw
, struct hclge_desc
*desc
, int num
)
218 struct hclge_dev
*hdev
= container_of(hw
, struct hclge_dev
, hw
);
219 struct hclge_desc
*desc_to_use
;
220 bool complete
= false;
224 u16 opcode
, desc_ret
;
227 spin_lock_bh(&hw
->cmq
.csq
.lock
);
229 if (num
> hclge_ring_space(&hw
->cmq
.csq
)) {
230 spin_unlock_bh(&hw
->cmq
.csq
.lock
);
235 * Record the location of desc in the ring for this time
236 * which will be use for hardware to write back
238 ntc
= hw
->cmq
.csq
.next_to_use
;
239 opcode
= le16_to_cpu(desc
[0].opcode
);
240 while (handle
< num
) {
241 desc_to_use
= &hw
->cmq
.csq
.desc
[hw
->cmq
.csq
.next_to_use
];
242 *desc_to_use
= desc
[handle
];
243 (hw
->cmq
.csq
.next_to_use
)++;
244 if (hw
->cmq
.csq
.next_to_use
== hw
->cmq
.csq
.desc_num
)
245 hw
->cmq
.csq
.next_to_use
= 0;
249 /* Write to hardware */
250 hclge_write_dev(hw
, HCLGE_NIC_CSQ_TAIL_REG
, hw
->cmq
.csq
.next_to_use
);
253 * If the command is sync, wait for the firmware to write back,
254 * if multi descriptors to be sent, use the first one to check
256 if (HCLGE_SEND_SYNC(le16_to_cpu(desc
->flag
))) {
258 if (hclge_cmd_csq_done(hw
))
262 } while (timeout
< hw
->cmq
.tx_timeout
);
265 if (hclge_cmd_csq_done(hw
)) {
268 while (handle
< num
) {
269 /* Get the result of hardware write back */
270 desc_to_use
= &hw
->cmq
.csq
.desc
[ntc
];
271 desc
[handle
] = *desc_to_use
;
273 if (likely(!hclge_is_special_opcode(opcode
)))
274 desc_ret
= le16_to_cpu(desc
[handle
].retval
);
276 desc_ret
= le16_to_cpu(desc
[0].retval
);
278 if (desc_ret
== HCLGE_CMD_EXEC_SUCCESS
)
282 hw
->cmq
.last_status
= desc_ret
;
285 if (ntc
== hw
->cmq
.csq
.desc_num
)
293 /* Clean the command send queue */
294 handle
= hclge_cmd_csq_clean(hw
);
296 dev_warn(&hdev
->pdev
->dev
,
297 "cleaned %d, need to clean %d\n", handle
, num
);
300 spin_unlock_bh(&hw
->cmq
.csq
.lock
);
305 static enum hclge_cmd_status
hclge_cmd_query_firmware_version(
306 struct hclge_hw
*hw
, u32
*version
)
308 struct hclge_query_version_cmd
*resp
;
309 struct hclge_desc desc
;
312 hclge_cmd_setup_basic_desc(&desc
, HCLGE_OPC_QUERY_FW_VER
, 1);
313 resp
= (struct hclge_query_version_cmd
*)desc
.data
;
315 ret
= hclge_cmd_send(hw
, &desc
, 1);
317 *version
= le32_to_cpu(resp
->firmware
);
322 int hclge_cmd_queue_init(struct hclge_dev
*hdev
)
326 /* Setup the queue entries for use cmd queue */
327 hdev
->hw
.cmq
.csq
.desc_num
= HCLGE_NIC_CMQ_DESC_NUM
;
328 hdev
->hw
.cmq
.crq
.desc_num
= HCLGE_NIC_CMQ_DESC_NUM
;
330 /* Setup Tx write back timeout */
331 hdev
->hw
.cmq
.tx_timeout
= HCLGE_CMDQ_TX_TIMEOUT
;
333 /* Setup queue rings */
334 ret
= hclge_alloc_cmd_queue(hdev
, HCLGE_TYPE_CSQ
);
336 dev_err(&hdev
->pdev
->dev
,
337 "CSQ ring setup error %d\n", ret
);
341 ret
= hclge_alloc_cmd_queue(hdev
, HCLGE_TYPE_CRQ
);
343 dev_err(&hdev
->pdev
->dev
,
344 "CRQ ring setup error %d\n", ret
);
350 hclge_free_cmd_desc(&hdev
->hw
.cmq
.csq
);
354 int hclge_cmd_init(struct hclge_dev
*hdev
)
359 hdev
->hw
.cmq
.csq
.next_to_clean
= 0;
360 hdev
->hw
.cmq
.csq
.next_to_use
= 0;
361 hdev
->hw
.cmq
.crq
.next_to_clean
= 0;
362 hdev
->hw
.cmq
.crq
.next_to_use
= 0;
364 /* Setup the lock for command queue */
365 spin_lock_init(&hdev
->hw
.cmq
.csq
.lock
);
366 spin_lock_init(&hdev
->hw
.cmq
.crq
.lock
);
368 hclge_cmd_init_regs(&hdev
->hw
);
370 ret
= hclge_cmd_query_firmware_version(&hdev
->hw
, &version
);
372 dev_err(&hdev
->pdev
->dev
,
373 "firmware version query failed %d\n", ret
);
376 hdev
->fw_version
= version
;
378 dev_info(&hdev
->pdev
->dev
, "The firmware version is %08x\n", version
);
383 static void hclge_destroy_queue(struct hclge_cmq_ring
*ring
)
385 spin_lock(&ring
->lock
);
386 hclge_free_cmd_desc(ring
);
387 spin_unlock(&ring
->lock
);
390 void hclge_destroy_cmd_queue(struct hclge_hw
*hw
)
392 hclge_destroy_queue(&hw
->cmq
.csq
);
393 hclge_destroy_queue(&hw
->cmq
.crq
);