]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
net: hns3: remove some redundant assignments
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_cmd.c
CommitLineData
f2b4a171 1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
68c0a5c7
S
9
10#include <linux/dma-mapping.h>
11#include <linux/slab.h>
12#include <linux/pci.h>
13#include <linux/device.h>
14#include <linux/err.h>
15#include <linux/dma-direction.h>
16#include "hclge_cmd.h"
17#include "hnae3.h"
18#include "hclge_main.h"
19
20#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
718d5392 21
68c0a5c7
S
22#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
23
24static int hclge_ring_space(struct hclge_cmq_ring *ring)
25{
26 int ntu = ring->next_to_use;
27 int ntc = ring->next_to_clean;
28 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
29
30 return ring->desc_num - used - 1;
31}
32
abc07440
HT
33static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int h)
34{
35 int u = ring->next_to_use;
36 int c = ring->next_to_clean;
37
38 if (unlikely(h >= ring->desc_num))
39 return 0;
40
41 return u > c ? (h > c && h <= u) : (h > c || h <= u);
42}
43
68c0a5c7
S
44static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
45{
46 int size = ring->desc_num * sizeof(struct hclge_desc);
47
2b5f2113 48 ring->desc = kzalloc(size, GFP_KERNEL);
68c0a5c7
S
49 if (!ring->desc)
50 return -ENOMEM;
51
2b5f2113 52 ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
53 size, DMA_BIDIRECTIONAL);
54 if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
55 ring->desc_dma_addr = 0;
56 kfree(ring->desc);
57 ring->desc = NULL;
58 return -ENOMEM;
59 }
60
68c0a5c7
S
61 return 0;
62}
63
64static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
65{
2b5f2113 66 dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
67 ring->desc_num * sizeof(ring->desc[0]),
68 DMA_BIDIRECTIONAL);
68c0a5c7 69
2b5f2113 70 ring->desc_dma_addr = 0;
71 kfree(ring->desc);
72 ring->desc = NULL;
68c0a5c7
S
73}
74
3efb960f 75static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
68c0a5c7
S
76{
77 struct hclge_hw *hw = &hdev->hw;
78 struct hclge_cmq_ring *ring =
79 (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
80 int ret;
81
2b106a30 82 ring->flag = ring_type;
68c0a5c7
S
83 ring->dev = hdev;
84
85 ret = hclge_alloc_cmd_desc(ring);
86 if (ret) {
87 dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
88 (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
89 return ret;
90 }
91
68c0a5c7
S
92 return 0;
93}
94
f7db940a
L
95void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
96{
97 desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
98 if (is_read)
99 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
100 else
101 desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
102}
103
68c0a5c7
S
104void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
105 enum hclge_opcode_type opcode, bool is_read)
106{
107 memset((void *)desc, 0, sizeof(struct hclge_desc));
108 desc->opcode = cpu_to_le16(opcode);
109 desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
110
111 if (is_read)
112 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
68c0a5c7
S
113}
114
115static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
116{
117 dma_addr_t dma = ring->desc_dma_addr;
118 struct hclge_dev *hdev = ring->dev;
119 struct hclge_hw *hw = &hdev->hw;
120
2b106a30 121 if (ring->flag == HCLGE_TYPE_CSQ) {
68c0a5c7 122 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
a0a5c359 123 lower_32_bits(dma));
68c0a5c7 124 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
a0a5c359 125 upper_32_bits(dma));
68c0a5c7
S
126 hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
127 (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
128 HCLGE_NIC_CMQ_ENABLE);
c4cf5a7a 129 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
c6e8792d 130 hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
68c0a5c7
S
131 } else {
132 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
a0a5c359 133 lower_32_bits(dma));
68c0a5c7 134 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
a0a5c359 135 upper_32_bits(dma));
68c0a5c7
S
136 hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
137 (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
138 HCLGE_NIC_CMQ_ENABLE);
c4cf5a7a 139 hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
c6e8792d 140 hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
68c0a5c7
S
141 }
142}
143
144static void hclge_cmd_init_regs(struct hclge_hw *hw)
145{
146 hclge_cmd_config_regs(&hw->cmq.csq);
147 hclge_cmd_config_regs(&hw->cmq.crq);
148}
149
150static int hclge_cmd_csq_clean(struct hclge_hw *hw)
151{
bf8fa169 152 struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
68c0a5c7 153 struct hclge_cmq_ring *csq = &hw->cmq.csq;
7f0fc947 154 u16 ntc = csq->next_to_clean;
155 struct hclge_desc *desc;
156 int clean = 0;
68c0a5c7
S
157 u32 head;
158
7f0fc947 159 desc = &csq->desc[ntc];
68c0a5c7 160 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
abc07440
HT
161 rmb(); /* Make sure head is ready before touch any data */
162
163 if (!is_valid_csq_clean_head(csq, head)) {
7f0fc947 164 dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head,
165 csq->next_to_use, csq->next_to_clean);
abc07440
HT
166 return 0;
167 }
68c0a5c7 168
7f0fc947 169 while (head != ntc) {
170 memset(desc, 0, sizeof(*desc));
171 ntc++;
172 if (ntc == csq->desc_num)
173 ntc = 0;
174 desc = &csq->desc[ntc];
175 clean++;
176 }
177 csq->next_to_clean = ntc;
178
68c0a5c7
S
179 return clean;
180}
181
182static int hclge_cmd_csq_done(struct hclge_hw *hw)
183{
184 u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
185 return head == hw->cmq.csq.next_to_use;
186}
187
188static bool hclge_is_special_opcode(u16 opcode)
189{
a070049d
HT
190 /* these commands have several descriptors,
191 * and use the first one to save opcode and return value
192 */
193 u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT,
194 HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC};
68c0a5c7
S
195 int i;
196
197 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
198 if (spec_opcode[i] == opcode)
199 return true;
200 }
201
202 return false;
203}
204
205/**
206 * hclge_cmd_send - send command to command queue
207 * @hw: pointer to the hw struct
208 * @desc: prefilled descriptor for describing the command
209 * @num : the number of descriptors to be sent
210 *
211 * This is the main send command for command queue, it
212 * sends the queue, cleans the queue, etc
213 **/
214int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
215{
bf8fa169 216 struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
68c0a5c7
S
217 struct hclge_desc *desc_to_use;
218 bool complete = false;
219 u32 timeout = 0;
220 int handle = 0;
221 int retval = 0;
222 u16 opcode, desc_ret;
223 int ntc;
224
225 spin_lock_bh(&hw->cmq.csq.lock);
226
885f9b8b 227 if (num > hclge_ring_space(&hw->cmq.csq)) {
68c0a5c7
S
228 spin_unlock_bh(&hw->cmq.csq.lock);
229 return -EBUSY;
230 }
231
232 /**
233 * Record the location of desc in the ring for this time
234 * which will be use for hardware to write back
235 */
236 ntc = hw->cmq.csq.next_to_use;
a90bb9a5 237 opcode = le16_to_cpu(desc[0].opcode);
68c0a5c7
S
238 while (handle < num) {
239 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
240 *desc_to_use = desc[handle];
241 (hw->cmq.csq.next_to_use)++;
e87bb5be 242 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
243 hw->cmq.csq.next_to_use = 0;
68c0a5c7
S
244 handle++;
245 }
246
247 /* Write to hardware */
248 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
249
250 /**
251 * If the command is sync, wait for the firmware to write back,
252 * if multi descriptors to be sent, use the first one to check
253 */
a90bb9a5 254 if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
68c0a5c7 255 do {
c1f60be6 256 if (hclge_cmd_csq_done(hw))
68c0a5c7
S
257 break;
258 udelay(1);
259 timeout++;
260 } while (timeout < hw->cmq.tx_timeout);
261 }
262
c1f60be6 263 if (hclge_cmd_csq_done(hw)) {
264 complete = true;
68c0a5c7
S
265 handle = 0;
266 while (handle < num) {
267 /* Get the result of hardware write back */
268 desc_to_use = &hw->cmq.csq.desc[ntc];
269 desc[handle] = *desc_to_use;
68c0a5c7
S
270
271 if (likely(!hclge_is_special_opcode(opcode)))
a90bb9a5 272 desc_ret = le16_to_cpu(desc[handle].retval);
68c0a5c7 273 else
a90bb9a5 274 desc_ret = le16_to_cpu(desc[0].retval);
68c0a5c7 275
c52b024c 276 if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
68c0a5c7
S
277 retval = 0;
278 else
279 retval = -EIO;
c52b024c 280 hw->cmq.last_status = desc_ret;
68c0a5c7
S
281 ntc++;
282 handle++;
e87bb5be 283 if (ntc == hw->cmq.csq.desc_num)
284 ntc = 0;
68c0a5c7
S
285 }
286 }
287
c1f60be6 288 if (!complete)
289 retval = -EAGAIN;
290
68c0a5c7
S
291 /* Clean the command send queue */
292 handle = hclge_cmd_csq_clean(hw);
293 if (handle != num) {
294 dev_warn(&hdev->pdev->dev,
295 "cleaned %d, need to clean %d\n", handle, num);
296 }
297
298 spin_unlock_bh(&hw->cmq.csq.lock);
299
300 return retval;
301}
302
1db9b1bf
YL
303static enum hclge_cmd_status hclge_cmd_query_firmware_version(
304 struct hclge_hw *hw, u32 *version)
68c0a5c7 305{
d44f9b63 306 struct hclge_query_version_cmd *resp;
68c0a5c7
S
307 struct hclge_desc desc;
308 int ret;
309
310 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
d44f9b63 311 resp = (struct hclge_query_version_cmd *)desc.data;
68c0a5c7
S
312
313 ret = hclge_cmd_send(hw, &desc, 1);
314 if (!ret)
315 *version = le32_to_cpu(resp->firmware);
316
317 return ret;
318}
319
3efb960f 320int hclge_cmd_queue_init(struct hclge_dev *hdev)
68c0a5c7 321{
68c0a5c7
S
322 int ret;
323
324 /* Setup the queue entries for use cmd queue */
325 hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
326 hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
327
68c0a5c7
S
328 /* Setup Tx write back timeout */
329 hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
330
331 /* Setup queue rings */
3efb960f 332 ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
68c0a5c7
S
333 if (ret) {
334 dev_err(&hdev->pdev->dev,
335 "CSQ ring setup error %d\n", ret);
336 return ret;
337 }
338
3efb960f 339 ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
68c0a5c7
S
340 if (ret) {
341 dev_err(&hdev->pdev->dev,
342 "CRQ ring setup error %d\n", ret);
343 goto err_csq;
344 }
345
3efb960f
L
346 return 0;
347err_csq:
348 hclge_free_cmd_desc(&hdev->hw.cmq.csq);
349 return ret;
350}
351
352int hclge_cmd_init(struct hclge_dev *hdev)
353{
354 u32 version;
355 int ret;
356
357 hdev->hw.cmq.csq.next_to_clean = 0;
358 hdev->hw.cmq.csq.next_to_use = 0;
359 hdev->hw.cmq.crq.next_to_clean = 0;
360 hdev->hw.cmq.crq.next_to_use = 0;
361
362 /* Setup the lock for command queue */
363 spin_lock_init(&hdev->hw.cmq.csq.lock);
364 spin_lock_init(&hdev->hw.cmq.crq.lock);
365
68c0a5c7
S
366 hclge_cmd_init_regs(&hdev->hw);
367
368 ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
369 if (ret) {
370 dev_err(&hdev->pdev->dev,
371 "firmware version query failed %d\n", ret);
372 return ret;
373 }
374 hdev->fw_version = version;
375
d7629e74 376 dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
68c0a5c7
S
377
378 return 0;
68c0a5c7
S
379}
380
381static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
382{
3df6aceb 383 spin_lock(&ring->lock);
68c0a5c7 384 hclge_free_cmd_desc(ring);
3df6aceb 385 spin_unlock(&ring->lock);
68c0a5c7
S
386}
387
388void hclge_destroy_cmd_queue(struct hclge_hw *hw)
389{
390 hclge_destroy_queue(&hw->cmq.csq);
391 hclge_destroy_queue(&hw->cmq.crq);
392}