]>
Commit | Line | Data |
---|---|---|
f2b4a171 | 1 | /* |
2 | * Copyright (c) 2016~2017 Hisilicon Limited. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | */ | |
68c0a5c7 S |
9 | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/pci.h> | |
13 | #include <linux/device.h> | |
14 | #include <linux/err.h> | |
15 | #include <linux/dma-direction.h> | |
16 | #include "hclge_cmd.h" | |
17 | #include "hnae3.h" | |
18 | #include "hclge_main.h" | |
19 | ||
20 | #define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ) | |
27430c60 | 21 | |
68c0a5c7 S |
22 | #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) |
23 | ||
24 | static int hclge_ring_space(struct hclge_cmq_ring *ring) | |
25 | { | |
26 | int ntu = ring->next_to_use; | |
27 | int ntc = ring->next_to_clean; | |
28 | int used = (ntu - ntc + ring->desc_num) % ring->desc_num; | |
29 | ||
30 | return ring->desc_num - used - 1; | |
31 | } | |
32 | ||
abc07440 HT |
33 | static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int h) |
34 | { | |
35 | int u = ring->next_to_use; | |
36 | int c = ring->next_to_clean; | |
37 | ||
38 | if (unlikely(h >= ring->desc_num)) | |
39 | return 0; | |
40 | ||
41 | return u > c ? (h > c && h <= u) : (h > c || h <= u); | |
42 | } | |
43 | ||
68c0a5c7 S |
44 | static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) |
45 | { | |
46 | int size = ring->desc_num * sizeof(struct hclge_desc); | |
47 | ||
139b93ed HT |
48 | ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), |
49 | size, &ring->desc_dma_addr, | |
50 | GFP_KERNEL); | |
68c0a5c7 S |
51 | if (!ring->desc) |
52 | return -ENOMEM; | |
53 | ||
68c0a5c7 S |
54 | return 0; |
55 | } | |
56 | ||
57 | static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring) | |
58 | { | |
139b93ed | 59 | int size = ring->desc_num * sizeof(struct hclge_desc); |
68c0a5c7 | 60 | |
139b93ed HT |
61 | if (ring->desc) { |
62 | dma_free_coherent(cmq_ring_to_dev(ring), size, | |
63 | ring->desc, ring->desc_dma_addr); | |
64 | ring->desc = NULL; | |
65 | } | |
68c0a5c7 S |
66 | } |
67 | ||
3efb960f | 68 | static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type) |
68c0a5c7 S |
69 | { |
70 | struct hclge_hw *hw = &hdev->hw; | |
71 | struct hclge_cmq_ring *ring = | |
72 | (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq; | |
73 | int ret; | |
74 | ||
5724458a | 75 | ring->ring_type = ring_type; |
68c0a5c7 S |
76 | ring->dev = hdev; |
77 | ||
78 | ret = hclge_alloc_cmd_desc(ring); | |
79 | if (ret) { | |
80 | dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n", | |
81 | (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret); | |
82 | return ret; | |
83 | } | |
84 | ||
68c0a5c7 S |
85 | return 0; |
86 | } | |
87 | ||
f7db940a L |
88 | void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read) |
89 | { | |
90 | desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN); | |
91 | if (is_read) | |
92 | desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR); | |
93 | else | |
94 | desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); | |
95 | } | |
96 | ||
68c0a5c7 S |
97 | void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, |
98 | enum hclge_opcode_type opcode, bool is_read) | |
99 | { | |
100 | memset((void *)desc, 0, sizeof(struct hclge_desc)); | |
101 | desc->opcode = cpu_to_le16(opcode); | |
102 | desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN); | |
103 | ||
104 | if (is_read) | |
105 | desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR); | |
68c0a5c7 S |
106 | } |
107 | ||
108 | static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) | |
109 | { | |
110 | dma_addr_t dma = ring->desc_dma_addr; | |
111 | struct hclge_dev *hdev = ring->dev; | |
112 | struct hclge_hw *hw = &hdev->hw; | |
113 | ||
5724458a | 114 | if (ring->ring_type == HCLGE_TYPE_CSQ) { |
68c0a5c7 | 115 | hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, |
6f3ea4f0 | 116 | lower_32_bits(dma)); |
68c0a5c7 | 117 | hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, |
6f3ea4f0 | 118 | upper_32_bits(dma)); |
68c0a5c7 S |
119 | hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, |
120 | (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | | |
121 | HCLGE_NIC_CMQ_ENABLE); | |
c4cf5a7a | 122 | hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); |
c6e8792d | 123 | hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); |
68c0a5c7 S |
124 | } else { |
125 | hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, | |
6f3ea4f0 | 126 | lower_32_bits(dma)); |
68c0a5c7 | 127 | hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, |
6f3ea4f0 | 128 | upper_32_bits(dma)); |
68c0a5c7 S |
129 | hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, |
130 | (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | | |
131 | HCLGE_NIC_CMQ_ENABLE); | |
c4cf5a7a | 132 | hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); |
c6e8792d | 133 | hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0); |
68c0a5c7 S |
134 | } |
135 | } | |
136 | ||
137 | static void hclge_cmd_init_regs(struct hclge_hw *hw) | |
138 | { | |
139 | hclge_cmd_config_regs(&hw->cmq.csq); | |
140 | hclge_cmd_config_regs(&hw->cmq.crq); | |
141 | } | |
142 | ||
143 | static int hclge_cmd_csq_clean(struct hclge_hw *hw) | |
144 | { | |
40a3c283 | 145 | struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); |
68c0a5c7 | 146 | struct hclge_cmq_ring *csq = &hw->cmq.csq; |
68c0a5c7 | 147 | u32 head; |
4838ebba | 148 | int clean; |
68c0a5c7 | 149 | |
68c0a5c7 | 150 | head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); |
abc07440 HT |
151 | rmb(); /* Make sure head is ready before touch any data */ |
152 | ||
153 | if (!is_valid_csq_clean_head(csq, head)) { | |
4838ebba HT |
154 | dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head, |
155 | csq->next_to_use, csq->next_to_clean); | |
abc07440 HT |
156 | return 0; |
157 | } | |
68c0a5c7 | 158 | |
4838ebba HT |
159 | clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; |
160 | csq->next_to_clean = head; | |
68c0a5c7 S |
161 | return clean; |
162 | } | |
163 | ||
164 | static int hclge_cmd_csq_done(struct hclge_hw *hw) | |
165 | { | |
166 | u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); | |
167 | return head == hw->cmq.csq.next_to_use; | |
168 | } | |
169 | ||
170 | static bool hclge_is_special_opcode(u16 opcode) | |
171 | { | |
a070049d HT |
172 | /* these commands have several descriptors, |
173 | * and use the first one to save opcode and return value | |
174 | */ | |
175 | u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT, | |
176 | HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC}; | |
68c0a5c7 S |
177 | int i; |
178 | ||
179 | for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { | |
180 | if (spec_opcode[i] == opcode) | |
181 | return true; | |
182 | } | |
183 | ||
184 | return false; | |
185 | } | |
186 | ||
187 | /** | |
188 | * hclge_cmd_send - send command to command queue | |
189 | * @hw: pointer to the hw struct | |
190 | * @desc: prefilled descriptor for describing the command | |
191 | * @num : the number of descriptors to be sent | |
192 | * | |
193 | * This is the main send command for command queue, it | |
194 | * sends the queue, cleans the queue, etc | |
195 | **/ | |
196 | int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) | |
197 | { | |
40a3c283 | 198 | struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); |
68c0a5c7 S |
199 | struct hclge_desc *desc_to_use; |
200 | bool complete = false; | |
201 | u32 timeout = 0; | |
202 | int handle = 0; | |
203 | int retval = 0; | |
204 | u16 opcode, desc_ret; | |
205 | int ntc; | |
206 | ||
207 | spin_lock_bh(&hw->cmq.csq.lock); | |
208 | ||
885f9b8b | 209 | if (num > hclge_ring_space(&hw->cmq.csq)) { |
68c0a5c7 S |
210 | spin_unlock_bh(&hw->cmq.csq.lock); |
211 | return -EBUSY; | |
212 | } | |
213 | ||
214 | /** | |
215 | * Record the location of desc in the ring for this time | |
216 | * which will be use for hardware to write back | |
217 | */ | |
218 | ntc = hw->cmq.csq.next_to_use; | |
a90bb9a5 | 219 | opcode = le16_to_cpu(desc[0].opcode); |
68c0a5c7 S |
220 | while (handle < num) { |
221 | desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; | |
222 | *desc_to_use = desc[handle]; | |
223 | (hw->cmq.csq.next_to_use)++; | |
c4a05b6f | 224 | hw->cmq.csq.next_to_use %= hw->cmq.csq.desc_num; |
68c0a5c7 S |
225 | handle++; |
226 | } | |
227 | ||
228 | /* Write to hardware */ | |
229 | hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use); | |
230 | ||
231 | /** | |
232 | * If the command is sync, wait for the firmware to write back, | |
233 | * if multi descriptors to be sent, use the first one to check | |
234 | */ | |
a90bb9a5 | 235 | if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) { |
68c0a5c7 | 236 | do { |
eed8c7c7 HT |
237 | if (hclge_cmd_csq_done(hw)) { |
238 | complete = true; | |
68c0a5c7 | 239 | break; |
eed8c7c7 | 240 | } |
68c0a5c7 S |
241 | udelay(1); |
242 | timeout++; | |
243 | } while (timeout < hw->cmq.tx_timeout); | |
244 | } | |
245 | ||
eed8c7c7 HT |
246 | if (!complete) { |
247 | retval = -EAGAIN; | |
248 | } else { | |
68c0a5c7 S |
249 | handle = 0; |
250 | while (handle < num) { | |
251 | /* Get the result of hardware write back */ | |
252 | desc_to_use = &hw->cmq.csq.desc[ntc]; | |
253 | desc[handle] = *desc_to_use; | |
68c0a5c7 S |
254 | |
255 | if (likely(!hclge_is_special_opcode(opcode))) | |
a90bb9a5 | 256 | desc_ret = le16_to_cpu(desc[handle].retval); |
68c0a5c7 | 257 | else |
a90bb9a5 | 258 | desc_ret = le16_to_cpu(desc[0].retval); |
68c0a5c7 | 259 | |
7dfa9f5c | 260 | if (desc_ret == HCLGE_CMD_EXEC_SUCCESS) |
68c0a5c7 S |
261 | retval = 0; |
262 | else | |
263 | retval = -EIO; | |
7dfa9f5c | 264 | hw->cmq.last_status = desc_ret; |
68c0a5c7 S |
265 | ntc++; |
266 | handle++; | |
c4a05b6f | 267 | ntc %= hw->cmq.csq.desc_num; |
68c0a5c7 S |
268 | } |
269 | } | |
270 | ||
68c0a5c7 S |
271 | /* Clean the command send queue */ |
272 | handle = hclge_cmd_csq_clean(hw); | |
273 | if (handle != num) { | |
274 | dev_warn(&hdev->pdev->dev, | |
275 | "cleaned %d, need to clean %d\n", handle, num); | |
276 | } | |
277 | ||
278 | spin_unlock_bh(&hw->cmq.csq.lock); | |
279 | ||
280 | return retval; | |
281 | } | |
282 | ||
1db9b1bf YL |
283 | static enum hclge_cmd_status hclge_cmd_query_firmware_version( |
284 | struct hclge_hw *hw, u32 *version) | |
68c0a5c7 | 285 | { |
d44f9b63 | 286 | struct hclge_query_version_cmd *resp; |
68c0a5c7 S |
287 | struct hclge_desc desc; |
288 | int ret; | |
289 | ||
290 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1); | |
d44f9b63 | 291 | resp = (struct hclge_query_version_cmd *)desc.data; |
68c0a5c7 S |
292 | |
293 | ret = hclge_cmd_send(hw, &desc, 1); | |
294 | if (!ret) | |
295 | *version = le32_to_cpu(resp->firmware); | |
296 | ||
297 | return ret; | |
298 | } | |
299 | ||
3efb960f | 300 | int hclge_cmd_queue_init(struct hclge_dev *hdev) |
68c0a5c7 | 301 | { |
68c0a5c7 S |
302 | int ret; |
303 | ||
304 | /* Setup the queue entries for use cmd queue */ | |
305 | hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; | |
306 | hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; | |
307 | ||
68c0a5c7 S |
308 | /* Setup Tx write back timeout */ |
309 | hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT; | |
310 | ||
311 | /* Setup queue rings */ | |
3efb960f | 312 | ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ); |
68c0a5c7 S |
313 | if (ret) { |
314 | dev_err(&hdev->pdev->dev, | |
315 | "CSQ ring setup error %d\n", ret); | |
316 | return ret; | |
317 | } | |
318 | ||
3efb960f | 319 | ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ); |
68c0a5c7 S |
320 | if (ret) { |
321 | dev_err(&hdev->pdev->dev, | |
322 | "CRQ ring setup error %d\n", ret); | |
323 | goto err_csq; | |
324 | } | |
325 | ||
3efb960f L |
326 | return 0; |
327 | err_csq: | |
328 | hclge_free_cmd_desc(&hdev->hw.cmq.csq); | |
329 | return ret; | |
330 | } | |
331 | ||
332 | int hclge_cmd_init(struct hclge_dev *hdev) | |
333 | { | |
334 | u32 version; | |
335 | int ret; | |
336 | ||
337 | hdev->hw.cmq.csq.next_to_clean = 0; | |
338 | hdev->hw.cmq.csq.next_to_use = 0; | |
339 | hdev->hw.cmq.crq.next_to_clean = 0; | |
340 | hdev->hw.cmq.crq.next_to_use = 0; | |
341 | ||
342 | /* Setup the lock for command queue */ | |
343 | spin_lock_init(&hdev->hw.cmq.csq.lock); | |
344 | spin_lock_init(&hdev->hw.cmq.crq.lock); | |
345 | ||
68c0a5c7 S |
346 | hclge_cmd_init_regs(&hdev->hw); |
347 | ||
348 | ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); | |
349 | if (ret) { | |
350 | dev_err(&hdev->pdev->dev, | |
351 | "firmware version query failed %d\n", ret); | |
352 | return ret; | |
353 | } | |
354 | hdev->fw_version = version; | |
355 | ||
d7629e74 | 356 | dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); |
68c0a5c7 S |
357 | |
358 | return 0; | |
68c0a5c7 S |
359 | } |
360 | ||
361 | static void hclge_destroy_queue(struct hclge_cmq_ring *ring) | |
362 | { | |
3df6aceb | 363 | spin_lock(&ring->lock); |
68c0a5c7 | 364 | hclge_free_cmd_desc(ring); |
3df6aceb | 365 | spin_unlock(&ring->lock); |
68c0a5c7 S |
366 | } |
367 | ||
368 | void hclge_destroy_cmd_queue(struct hclge_hw *hw) | |
369 | { | |
370 | hclge_destroy_queue(&hw->cmq.csq); | |
371 | hclge_destroy_queue(&hw->cmq.crq); | |
372 | } |