]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
net: hns3: remove hclge_get_vector_index from hclge_bind_ring_with_vector
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3vf / hclgevf_main.c
CommitLineData
5bc3f5f3
SM
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2016-2017 Hisilicon Limited.
3
4#include <linux/etherdevice.h>
3a6b148b 5#include <net/rtnetlink.h>
5bc3f5f3
SM
6#include "hclgevf_cmd.h"
7#include "hclgevf_main.h"
8#include "hclge_mbx.h"
9#include "hnae3.h"
10
11#define HCLGEVF_NAME "hclgevf"
12
1f05a70d
SM
13static int hclgevf_init_hdev(struct hclgevf_dev *hdev);
14static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev);
5bc3f5f3
SM
15static struct hnae3_ae_algo ae_algovf;
16
17static const struct pci_device_id ae_algovf_pci_tbl[] = {
18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
20 /* required last entry */
21 {0, }
22};
23
28d9cec8
YL
24MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
25
5bc3f5f3
SM
26static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
27 struct hnae3_handle *handle)
28{
29 return container_of(handle, struct hclgevf_dev, nic);
30}
31
32static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
33{
34 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
35 struct hnae3_queue *queue;
36 struct hclgevf_desc desc;
37 struct hclgevf_tqp *tqp;
38 int status;
39 int i;
40
41 for (i = 0; i < hdev->num_tqps; i++) {
42 queue = handle->kinfo.tqp[i];
43 tqp = container_of(queue, struct hclgevf_tqp, q);
44 hclgevf_cmd_setup_basic_desc(&desc,
45 HCLGEVF_OPC_QUERY_RX_STATUS,
46 true);
47
48 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
49 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
50 if (status) {
51 dev_err(&hdev->pdev->dev,
52 "Query tqp stat fail, status = %d,queue = %d\n",
53 status, i);
54 return status;
55 }
56 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
93991b65 57 le32_to_cpu(desc.data[1]);
5bc3f5f3
SM
58
59 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
60 true);
61
62 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
63 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
64 if (status) {
65 dev_err(&hdev->pdev->dev,
66 "Query tqp stat fail, status = %d,queue = %d\n",
67 status, i);
68 return status;
69 }
70 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
93991b65 71 le32_to_cpu(desc.data[1]);
5bc3f5f3
SM
72 }
73
74 return 0;
75}
76
77static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
78{
79 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
80 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
81 struct hclgevf_tqp *tqp;
82 u64 *buff = data;
83 int i;
84
85 for (i = 0; i < hdev->num_tqps; i++) {
86 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
87 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
88 }
89 for (i = 0; i < kinfo->num_tqps; i++) {
90 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
91 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
92 }
93
94 return buff;
95}
96
97static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
98{
99 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
100
101 return hdev->num_tqps * 2;
102}
103
104static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
105{
106 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
107 u8 *buff = data;
108 int i = 0;
109
110 for (i = 0; i < hdev->num_tqps; i++) {
111 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
112 struct hclgevf_tqp, q);
c36317be 113 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
5bc3f5f3
SM
114 tqp->index);
115 buff += ETH_GSTRING_LEN;
116 }
117
118 for (i = 0; i < hdev->num_tqps; i++) {
119 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
120 struct hclgevf_tqp, q);
c36317be 121 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
5bc3f5f3
SM
122 tqp->index);
123 buff += ETH_GSTRING_LEN;
124 }
125
126 return buff;
127}
128
129static void hclgevf_update_stats(struct hnae3_handle *handle,
130 struct net_device_stats *net_stats)
131{
132 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
133 int status;
134
135 status = hclgevf_tqps_update_stats(handle);
136 if (status)
137 dev_err(&hdev->pdev->dev,
138 "VF update of TQPS stats fail, status = %d.\n",
139 status);
140}
141
142static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
143{
144 if (strset == ETH_SS_TEST)
145 return -EOPNOTSUPP;
146 else if (strset == ETH_SS_STATS)
147 return hclgevf_tqps_get_sset_count(handle, strset);
148
149 return 0;
150}
151
152static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
153 u8 *data)
154{
155 u8 *p = (char *)data;
156
157 if (strset == ETH_SS_STATS)
158 p = hclgevf_tqps_get_strings(handle, p);
159}
160
161static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
162{
163 hclgevf_tqps_get_stats(handle, data);
164}
165
166static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
167{
168 u8 resp_msg;
169 int status;
170
171 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
172 true, &resp_msg, sizeof(u8));
173 if (status) {
174 dev_err(&hdev->pdev->dev,
175 "VF request to get TC info from PF failed %d",
176 status);
177 return status;
178 }
179
180 hdev->hw_tc_map = resp_msg;
181
182 return 0;
183}
184
185static int hclge_get_queue_info(struct hclgevf_dev *hdev)
186{
187#define HCLGEVF_TQPS_RSS_INFO_LEN 8
188 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
189 int status;
190
191 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
192 true, resp_msg,
193 HCLGEVF_TQPS_RSS_INFO_LEN);
194 if (status) {
195 dev_err(&hdev->pdev->dev,
196 "VF request to get tqp info from PF failed %d",
197 status);
198 return status;
199 }
200
201 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
202 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
203 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
204 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
205
206 return 0;
207}
208
5bc3f5f3
SM
209static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
210{
211 struct hclgevf_tqp *tqp;
212 int i;
213
1f05a70d
SM
214 /* if this is on going reset then we need to re-allocate the TPQs
215 * since we cannot assume we would get same number of TPQs back from PF
216 */
217 if (hclgevf_dev_ongoing_reset(hdev))
218 devm_kfree(&hdev->pdev->dev, hdev->htqp);
219
5bc3f5f3
SM
220 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
221 sizeof(struct hclgevf_tqp), GFP_KERNEL);
222 if (!hdev->htqp)
223 return -ENOMEM;
224
225 tqp = hdev->htqp;
226
227 for (i = 0; i < hdev->num_tqps; i++) {
228 tqp->dev = &hdev->pdev->dev;
229 tqp->index = i;
230
231 tqp->q.ae_algo = &ae_algovf;
232 tqp->q.buf_size = hdev->rx_buf_len;
233 tqp->q.desc_num = hdev->num_desc;
234 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
235 i * HCLGEVF_TQP_REG_SIZE;
236
237 tqp++;
238 }
239
240 return 0;
241}
242
243static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
244{
245 struct hnae3_handle *nic = &hdev->nic;
246 struct hnae3_knic_private_info *kinfo;
247 u16 new_tqps = hdev->num_tqps;
248 int i;
249
250 kinfo = &nic->kinfo;
251 kinfo->num_tc = 0;
252 kinfo->num_desc = hdev->num_desc;
253 kinfo->rx_buf_len = hdev->rx_buf_len;
254 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
255 if (hdev->hw_tc_map & BIT(i))
256 kinfo->num_tc++;
257
258 kinfo->rss_size
259 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
260 new_tqps = kinfo->rss_size * kinfo->num_tc;
261 kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
262
1f05a70d
SM
263 /* if this is on going reset then we need to re-allocate the hnae queues
264 * as well since number of TPQs from PF might have changed.
265 */
266 if (hclgevf_dev_ongoing_reset(hdev))
267 devm_kfree(&hdev->pdev->dev, kinfo->tqp);
268
5bc3f5f3
SM
269 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
270 sizeof(struct hnae3_queue *), GFP_KERNEL);
271 if (!kinfo->tqp)
272 return -ENOMEM;
273
274 for (i = 0; i < kinfo->num_tqps; i++) {
275 hdev->htqp[i].q.handle = &hdev->nic;
276 hdev->htqp[i].q.tqp_index = i;
277 kinfo->tqp[i] = &hdev->htqp[i].q;
278 }
279
280 return 0;
281}
282
283static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
284{
285 int status;
286 u8 resp_msg;
287
288 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
289 0, false, &resp_msg, sizeof(u8));
290 if (status)
291 dev_err(&hdev->pdev->dev,
292 "VF failed to fetch link status(%d) from PF", status);
293}
294
295void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
296{
bc0b7416 297 struct hnae3_handle *rhandle = &hdev->roce;
5bc3f5f3 298 struct hnae3_handle *handle = &hdev->nic;
15a50665 299 struct hnae3_client *rclient;
5bc3f5f3
SM
300 struct hnae3_client *client;
301
302 client = handle->client;
15a50665 303 rclient = hdev->roce_client;
5bc3f5f3
SM
304
305 if (link_state != hdev->hw.mac.link) {
306 client->ops->link_status_change(handle, !!link_state);
15a50665 307 if (rclient && rclient->ops->link_status_change)
bc0b7416 308 rclient->ops->link_status_change(rhandle, !!link_state);
5bc3f5f3
SM
309 hdev->hw.mac.link = link_state;
310 }
311}
312
313static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
314{
315 struct hnae3_handle *nic = &hdev->nic;
316 int ret;
317
318 nic->ae_algo = &ae_algovf;
319 nic->pdev = hdev->pdev;
320 nic->numa_node_mask = hdev->numa_node_mask;
a9c89a3f 321 nic->flags |= HNAE3_SUPPORT_VF;
5bc3f5f3
SM
322
323 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
324 dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
325 hdev->ae_dev->dev_type);
326 return -EINVAL;
327 }
328
329 ret = hclgevf_knic_setup(hdev);
330 if (ret)
331 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
332 ret);
333 return ret;
334}
335
336static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
337{
338 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
339 hdev->num_msi_left += 1;
340 hdev->num_msi_used -= 1;
341}
342
343static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
344 struct hnae3_vector_info *vector_info)
345{
346 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
347 struct hnae3_vector_info *vector = vector_info;
348 int alloc = 0;
349 int i, j;
350
351 vector_num = min(hdev->num_msi_left, vector_num);
352
353 for (j = 0; j < vector_num; j++) {
354 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
355 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
356 vector->vector = pci_irq_vector(hdev->pdev, i);
357 vector->io_addr = hdev->hw.io_base +
358 HCLGEVF_VECTOR_REG_BASE +
359 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
360 hdev->vector_status[i] = 0;
361 hdev->vector_irq[i] = vector->vector;
362
363 vector++;
364 alloc++;
365
366 break;
367 }
368 }
369 }
370 hdev->num_msi_left -= alloc;
371 hdev->num_msi_used += alloc;
372
373 return alloc;
374}
375
376static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
377{
378 int i;
379
380 for (i = 0; i < hdev->num_msi; i++)
381 if (vector == hdev->vector_irq[i])
382 return i;
383
384 return -EINVAL;
385}
386
387static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
388{
389 return HCLGEVF_RSS_KEY_SIZE;
390}
391
392static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
393{
394 return HCLGEVF_RSS_IND_TBL_SIZE;
395}
396
397static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
398{
399 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
400 struct hclgevf_rss_indirection_table_cmd *req;
401 struct hclgevf_desc desc;
402 int status;
403 int i, j;
404
405 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
406
407 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
408 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
409 false);
410 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
411 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
412 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
413 req->rss_result[j] =
414 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
415
416 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
417 if (status) {
418 dev_err(&hdev->pdev->dev,
419 "VF failed(=%d) to set RSS indirection table\n",
420 status);
421 return status;
422 }
423 }
424
425 return 0;
426}
427
428static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
429{
430 struct hclgevf_rss_tc_mode_cmd *req;
431 u16 tc_offset[HCLGEVF_MAX_TC_NUM];
432 u16 tc_valid[HCLGEVF_MAX_TC_NUM];
433 u16 tc_size[HCLGEVF_MAX_TC_NUM];
434 struct hclgevf_desc desc;
435 u16 roundup_size;
436 int status;
437 int i;
438
439 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
440
441 roundup_size = roundup_pow_of_two(rss_size);
442 roundup_size = ilog2(roundup_size);
443
444 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
445 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
446 tc_size[i] = roundup_size;
447 tc_offset[i] = rss_size * i;
448 }
449
450 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
451 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
928d369a 452 hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
453 (tc_valid[i] & 0x1));
454 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
455 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
456 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
457 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
5bc3f5f3
SM
458 }
459 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
460 if (status)
461 dev_err(&hdev->pdev->dev,
462 "VF failed(=%d) to set rss tc mode\n", status);
463
464 return status;
465}
466
467static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
468 u8 *key)
469{
470 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
471 struct hclgevf_rss_config_cmd *req;
472 int lkup_times = key ? 3 : 1;
473 struct hclgevf_desc desc;
474 int key_offset;
475 int key_size;
476 int status;
477
478 req = (struct hclgevf_rss_config_cmd *)desc.data;
479 lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
480
481 for (key_offset = 0; key_offset < lkup_times; key_offset++) {
482 hclgevf_cmd_setup_basic_desc(&desc,
483 HCLGEVF_OPC_RSS_GENERIC_CONFIG,
484 true);
485 req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
486
487 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
488 if (status) {
489 dev_err(&hdev->pdev->dev,
490 "failed to get hardware RSS cfg, status = %d\n",
491 status);
492 return status;
493 }
494
495 if (key_offset == 2)
496 key_size =
497 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
498 else
499 key_size = HCLGEVF_RSS_HASH_KEY_NUM;
500
501 if (key)
502 memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
503 req->hash_key,
504 key_size);
505 }
506
507 if (hash) {
508 if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
509 *hash = ETH_RSS_HASH_TOP;
510 else
511 *hash = ETH_RSS_HASH_UNKNOWN;
512 }
513
514 return 0;
515}
516
517static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
518 u8 *hfunc)
519{
520 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
521 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
522 int i;
523
524 if (indir)
525 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
526 indir[i] = rss_cfg->rss_indirection_tbl[i];
527
528 return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
529}
530
531static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
532 const u8 *key, const u8 hfunc)
533{
534 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
535 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
536 int i;
537
538 /* update the shadow RSS table with user specified qids */
539 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
540 rss_cfg->rss_indirection_tbl[i] = indir[i];
541
542 /* update the hardware */
543 return hclgevf_set_rss_indir_table(hdev);
544}
545
546static int hclgevf_get_tc_size(struct hnae3_handle *handle)
547{
548 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
549 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
550
551 return rss_cfg->rss_size;
552}
553
554static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
ddd58c64 555 int vector_id,
5bc3f5f3
SM
556 struct hnae3_ring_chain_node *ring_chain)
557{
5bc3f5f3
SM
558 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
559 struct hnae3_ring_chain_node *node;
560 struct hclge_mbx_vf_to_pf_cmd *req;
561 struct hclgevf_desc desc;
ddd58c64 562 int i = 0;
5bc3f5f3
SM
563 int status;
564 u8 type;
565
566 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
5bc3f5f3 567
5bc3f5f3 568 for (node = ring_chain; node; node = node->next) {
ce50439a
YL
569 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
570 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i;
571
572 if (i == 0) {
573 hclgevf_cmd_setup_basic_desc(&desc,
574 HCLGEVF_OPC_MBX_VF_TO_PF,
575 false);
576 type = en ?
577 HCLGE_MBX_MAP_RING_TO_VECTOR :
578 HCLGE_MBX_UNMAP_RING_TO_VECTOR;
579 req->msg[0] = type;
580 req->msg[1] = vector_id;
581 }
582
583 req->msg[idx_offset] =
928d369a 584 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
ce50439a 585 req->msg[idx_offset + 1] = node->tqp_index;
928d369a 586 req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx,
587 HNAE3_RING_GL_IDX_M,
588 HNAE3_RING_GL_IDX_S);
ce50439a
YL
589
590 i++;
591 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
592 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
593 HCLGE_MBX_RING_NODE_VARIABLE_NUM) ||
594 !node->next) {
5bc3f5f3
SM
595 req->msg[2] = i;
596
597 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
598 if (status) {
599 dev_err(&hdev->pdev->dev,
600 "Map TQP fail, status is %d.\n",
601 status);
602 return status;
603 }
604 i = 0;
605 hclgevf_cmd_setup_basic_desc(&desc,
606 HCLGEVF_OPC_MBX_VF_TO_PF,
607 false);
608 req->msg[0] = type;
609 req->msg[1] = vector_id;
610 }
611 }
612
5bc3f5f3
SM
613 return 0;
614}
615
616static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
617 struct hnae3_ring_chain_node *ring_chain)
618{
ddd58c64
PL
619 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
620 int vector_id;
621
622 vector_id = hclgevf_get_vector_index(hdev, vector);
623 if (vector_id < 0) {
624 dev_err(&handle->pdev->dev,
625 "Get vector index fail. ret =%d\n", vector_id);
626 return vector_id;
627 }
628
629 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
5bc3f5f3
SM
630}
631
632static int hclgevf_unmap_ring_from_vector(
633 struct hnae3_handle *handle,
634 int vector,
635 struct hnae3_ring_chain_node *ring_chain)
636{
637 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
638 int ret, vector_id;
639
640 vector_id = hclgevf_get_vector_index(hdev, vector);
641 if (vector_id < 0) {
642 dev_err(&handle->pdev->dev,
643 "Get vector index fail. ret =%d\n", vector_id);
644 return vector_id;
645 }
646
ddd58c64 647 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
7412200c 648 if (ret)
5bc3f5f3
SM
649 dev_err(&handle->pdev->dev,
650 "Unmap ring from vector fail. vector=%d, ret =%d\n",
651 vector_id,
652 ret);
7412200c
YL
653
654 return ret;
655}
656
657static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
658{
659 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
5bc3f5f3 660
56525e90 661 hclgevf_free_vector(hdev, vector);
5bc3f5f3
SM
662
663 return 0;
664}
665
e8600a3d
PL
666static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
667 bool en_uc_pmc, bool en_mc_pmc)
5bc3f5f3
SM
668{
669 struct hclge_mbx_vf_to_pf_cmd *req;
670 struct hclgevf_desc desc;
671 int status;
672
673 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
674
675 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
676 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
e8600a3d
PL
677 req->msg[1] = en_uc_pmc ? 1 : 0;
678 req->msg[2] = en_mc_pmc ? 1 : 0;
5bc3f5f3
SM
679
680 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
681 if (status)
682 dev_err(&hdev->pdev->dev,
683 "Set promisc mode fail, status is %d.\n", status);
684
685 return status;
686}
687
e8600a3d
PL
688static void hclgevf_set_promisc_mode(struct hnae3_handle *handle,
689 bool en_uc_pmc, bool en_mc_pmc)
5bc3f5f3
SM
690{
691 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
692
e8600a3d 693 hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
5bc3f5f3
SM
694}
695
696static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
697 int stream_id, bool enable)
698{
699 struct hclgevf_cfg_com_tqp_queue_cmd *req;
700 struct hclgevf_desc desc;
701 int status;
702
703 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
704
705 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
706 false);
707 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
708 req->stream_id = cpu_to_le16(stream_id);
709 req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
710
711 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
712 if (status)
713 dev_err(&hdev->pdev->dev,
714 "TQP enable fail, status =%d.\n", status);
715
716 return status;
717}
718
719static int hclgevf_get_queue_id(struct hnae3_queue *queue)
720{
721 struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
722
723 return tqp->index;
724}
725
726static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
727{
728 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
729 struct hnae3_queue *queue;
730 struct hclgevf_tqp *tqp;
731 int i;
732
733 for (i = 0; i < hdev->num_tqps; i++) {
734 queue = handle->kinfo.tqp[i];
735 tqp = container_of(queue, struct hclgevf_tqp, q);
736 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
737 }
738}
739
038efa64
XW
740static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev)
741{
742 u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX;
743 int ret;
744
745 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
746 HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ,
747 NULL, 0, true, &resp_msg, sizeof(u8));
748
749 if (ret) {
750 dev_err(&hdev->pdev->dev,
751 "Read mta type fail, ret=%d.\n", ret);
752 return ret;
753 }
754
755 if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) {
756 dev_err(&hdev->pdev->dev,
757 "Read mta type invalid, resp=%d.\n", resp_msg);
758 return -EINVAL;
759 }
760
761 hdev->mta_mac_sel_type = resp_msg;
762
763 return 0;
764}
765
766static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev,
767 const u8 *addr)
768{
769 u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type;
770 u16 high_val = addr[1] | (addr[0] << 8);
771
772 return (high_val >> rsh) & 0xfff;
773}
774
775static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev,
776 unsigned long *status)
777{
778#define HCLGEVF_MTA_STATUS_MSG_SIZE 13
779#define HCLGEVF_MTA_STATUS_MSG_BITS \
780 (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
781#define HCLGEVF_MTA_STATUS_MSG_END_BITS \
782 (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS)
783 u16 tbl_cnt;
784 u16 tbl_idx;
785 u8 msg_cnt;
786 u8 msg_idx;
787 int ret;
788
789 msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE,
790 HCLGEVF_MTA_STATUS_MSG_BITS);
791 tbl_idx = 0;
792 msg_idx = 0;
793 while (msg_cnt--) {
794 u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1];
795 u8 *p = &msg[1];
796 u8 msg_ofs;
797 u8 msg_bit;
798
799 memset(msg, 0, sizeof(msg));
800
801 /* set index field */
802 msg[0] = 0x7F & msg_idx;
803
804 /* set end flag field */
805 if (msg_cnt == 0) {
806 msg[0] |= 0x80;
807 tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS;
808 } else {
809 tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS;
810 }
811
812 /* set status field */
813 msg_ofs = 0;
814 msg_bit = 0;
815 while (tbl_cnt--) {
816 if (test_bit(tbl_idx, status))
817 p[msg_ofs] |= BIT(msg_bit);
818
819 tbl_idx++;
820
821 msg_bit++;
822 if (msg_bit == BITS_PER_BYTE) {
823 msg_bit = 0;
824 msg_ofs++;
825 }
826 }
827
828 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
829 HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE,
830 msg, sizeof(msg), false, NULL, 0);
831 if (ret)
832 break;
833
834 msg_idx++;
835 }
836
837 return ret;
838}
839
840static int hclgevf_update_mta_status(struct hnae3_handle *handle)
841{
842 unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)];
843 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
844 struct net_device *netdev = hdev->nic.kinfo.netdev;
845 struct netdev_hw_addr *ha;
846 u16 tbl_idx;
847
848 /* clear status */
849 memset(mta_status, 0, sizeof(mta_status));
850
851 /* update status from mc addr list */
852 netdev_for_each_mc_addr(ha, netdev) {
853 tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr);
854 set_bit(tbl_idx, mta_status);
855 }
856
857 return hclgevf_do_update_mta_status(hdev, mta_status);
858}
859
5bc3f5f3
SM
860static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
861{
862 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
863
864 ether_addr_copy(p, hdev->hw.mac.mac_addr);
865}
866
3cbf5e2d
FL
867static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
868 bool is_first)
5bc3f5f3
SM
869{
870 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
871 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
872 u8 *new_mac_addr = (u8 *)p;
873 u8 msg_data[ETH_ALEN * 2];
3cbf5e2d 874 u16 subcode;
5bc3f5f3
SM
875 int status;
876
877 ether_addr_copy(msg_data, new_mac_addr);
878 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
879
3cbf5e2d
FL
880 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
881 HCLGE_MBX_MAC_VLAN_UC_MODIFY;
882
5bc3f5f3 883 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
3cbf5e2d 884 subcode, msg_data, ETH_ALEN * 2,
5a955cd2 885 true, NULL, 0);
5bc3f5f3
SM
886 if (!status)
887 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
888
889 return status;
890}
891
892static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
893 const unsigned char *addr)
894{
895 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
896
897 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
898 HCLGE_MBX_MAC_VLAN_UC_ADD,
899 addr, ETH_ALEN, false, NULL, 0);
900}
901
902static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
903 const unsigned char *addr)
904{
905 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
906
907 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
908 HCLGE_MBX_MAC_VLAN_UC_REMOVE,
909 addr, ETH_ALEN, false, NULL, 0);
910}
911
912static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
913 const unsigned char *addr)
914{
915 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
916
917 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
918 HCLGE_MBX_MAC_VLAN_MC_ADD,
919 addr, ETH_ALEN, false, NULL, 0);
920}
921
922static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
923 const unsigned char *addr)
924{
925 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
926
927 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
928 HCLGE_MBX_MAC_VLAN_MC_REMOVE,
929 addr, ETH_ALEN, false, NULL, 0);
930}
931
932static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
933 __be16 proto, u16 vlan_id,
934 bool is_kill)
935{
936#define HCLGEVF_VLAN_MBX_MSG_LEN 5
937 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
938 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
939
940 if (vlan_id > 4095)
941 return -EINVAL;
942
943 if (proto != htons(ETH_P_8021Q))
944 return -EPROTONOSUPPORT;
945
946 msg_data[0] = is_kill;
947 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
948 memcpy(&msg_data[3], &proto, sizeof(proto));
949 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
950 HCLGE_MBX_VLAN_FILTER, msg_data,
951 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
952}
953
3849d494
YL
954static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
955{
956 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
957 u8 msg_data;
958
959 msg_data = enable ? 1 : 0;
960 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
961 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
962 1, false, NULL, 0);
963}
964
5bc3f5f3
SM
965static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
966{
967 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
968 u8 msg_data[2];
d3ea7fc4 969 int ret;
5bc3f5f3
SM
970
971 memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
972
d3ea7fc4
PL
973 /* disable vf queue before send queue reset msg to PF */
974 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
975 if (ret)
976 return;
977
978 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
979 2, true, NULL, 0);
5bc3f5f3
SM
980}
981
3a6b148b
SM
982static int hclgevf_notify_client(struct hclgevf_dev *hdev,
983 enum hnae3_reset_notify_type type)
984{
985 struct hnae3_client *client = hdev->nic_client;
986 struct hnae3_handle *handle = &hdev->nic;
987
988 if (!client->ops->reset_notify)
989 return -EOPNOTSUPP;
990
991 return client->ops->reset_notify(handle, type);
992}
993
994static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
995{
996#define HCLGEVF_RESET_WAIT_MS 500
997#define HCLGEVF_RESET_WAIT_CNT 20
998 u32 val, cnt = 0;
999
1000 /* wait to check the hardware reset completion status */
1001 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
928d369a 1002 while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
1003 (cnt < HCLGEVF_RESET_WAIT_CNT)) {
3a6b148b
SM
1004 msleep(HCLGEVF_RESET_WAIT_MS);
1005 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
1006 cnt++;
1007 }
1008
1009 /* hardware completion status should be available by this time */
1010 if (cnt >= HCLGEVF_RESET_WAIT_CNT) {
1011 dev_warn(&hdev->pdev->dev,
1012 "could'nt get reset done status from h/w, timeout!\n");
1013 return -EBUSY;
1014 }
1015
1016 /* we will wait a bit more to let reset of the stack to complete. This
1017 * might happen in case reset assertion was made by PF. Yes, this also
1018 * means we might end up waiting bit more even for VF reset.
1019 */
1020 msleep(5000);
1021
1022 return 0;
1023}
1024
1025static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1026{
1f05a70d
SM
1027 int ret;
1028
3a6b148b
SM
1029 /* uninitialize the nic client */
1030 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1031
1f05a70d
SM
1032 /* re-initialize the hclge device */
1033 ret = hclgevf_init_hdev(hdev);
1034 if (ret) {
1035 dev_err(&hdev->pdev->dev,
1036 "hclge device re-init failed, VF is disabled!\n");
1037 return ret;
1038 }
3a6b148b
SM
1039
1040 /* bring up the nic client again */
1041 hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1042
1043 return 0;
1044}
1045
1046static int hclgevf_reset(struct hclgevf_dev *hdev)
1047{
1048 int ret;
1049
1050 rtnl_lock();
1051
1052 /* bring down the nic to stop any ongoing TX/RX */
1053 hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1054
1055 /* check if VF could successfully fetch the hardware reset completion
1056 * status from the hardware
1057 */
1058 ret = hclgevf_reset_wait(hdev);
1059 if (ret) {
1060 /* can't do much in this situation, will disable VF */
1061 dev_err(&hdev->pdev->dev,
1062 "VF failed(=%d) to fetch H/W reset completion status\n",
1063 ret);
1064
1065 dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
1066 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1067
1068 rtnl_unlock();
1069 return ret;
1070 }
1071
1072 /* now, re-initialize the nic client and ae device*/
1073 ret = hclgevf_reset_stack(hdev);
1074 if (ret)
1075 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1076
1077 /* bring up the nic to enable TX/RX again */
1078 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1079
1080 rtnl_unlock();
1081
1082 return ret;
1083}
1084
d0e76212
SM
1085static int hclgevf_do_reset(struct hclgevf_dev *hdev)
1086{
1087 int status;
1088 u8 respmsg;
1089
1090 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
1091 0, false, &respmsg, sizeof(u8));
1092 if (status)
1093 dev_err(&hdev->pdev->dev,
1094 "VF reset request to PF failed(=%d)\n", status);
1095
1096 return status;
1097}
1098
4aef908d
SM
1099static void hclgevf_reset_event(struct hnae3_handle *handle)
1100{
1101 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1102
1103 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
1104
1105 handle->reset_level = HNAE3_VF_RESET;
1106
bb2edc2e
SM
1107 /* reset of this VF requested */
1108 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
1109 hclgevf_reset_task_schedule(hdev);
4aef908d
SM
1110
1111 handle->last_reset_time = jiffies;
1112}
1113
5bc3f5f3
SM
1114static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
1115{
1116 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1117
1118 return hdev->fw_version;
1119}
1120
1121static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
1122{
1123 struct hclgevf_misc_vector *vector = &hdev->misc_vector;
1124
1125 vector->vector_irq = pci_irq_vector(hdev->pdev,
1126 HCLGEVF_MISC_VECTOR_NUM);
1127 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
1128 /* vector status always valid for Vector 0 */
1129 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
1130 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
1131
1132 hdev->num_msi_left -= 1;
1133 hdev->num_msi_used += 1;
1134}
1135
f0412650
SM
1136void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
1137{
1138 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
1139 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
1140 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1141 schedule_work(&hdev->rst_service_task);
1142 }
1143}
1144
85a86c48 1145void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
5bc3f5f3 1146{
85a86c48
SM
1147 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
1148 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
1149 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
5bc3f5f3 1150 schedule_work(&hdev->mbx_service_task);
85a86c48 1151 }
5bc3f5f3
SM
1152}
1153
1154static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
1155{
1156 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
1157 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
1158 schedule_work(&hdev->service_task);
1159}
1160
bb2edc2e
SM
1161static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
1162{
85a86c48
SM
1163 /* if we have any pending mailbox event then schedule the mbx task */
1164 if (hdev->mbx_event_pending)
1165 hclgevf_mbx_task_schedule(hdev);
1166
bb2edc2e
SM
1167 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
1168 hclgevf_reset_task_schedule(hdev);
1169}
1170
5bc3f5f3
SM
1171static void hclgevf_service_timer(struct timer_list *t)
1172{
1173 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
1174
1175 mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
1176
1177 hclgevf_task_schedule(hdev);
1178}
1179
f0412650
SM
1180static void hclgevf_reset_service_task(struct work_struct *work)
1181{
1182 struct hclgevf_dev *hdev =
1183 container_of(work, struct hclgevf_dev, rst_service_task);
d0e76212 1184 int ret;
f0412650
SM
1185
1186 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1187 return;
1188
1189 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1190
bb2edc2e
SM
1191 if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
1192 &hdev->reset_state)) {
1193 /* PF has initmated that it is about to reset the hardware.
1194 * We now have to poll & check if harware has actually completed
1195 * the reset sequence. On hardware reset completion, VF needs to
1196 * reset the client and ae device.
1197 */
1198 hdev->reset_attempts = 0;
1199
3a6b148b
SM
1200 ret = hclgevf_reset(hdev);
1201 if (ret)
1202 dev_err(&hdev->pdev->dev, "VF stack reset failed.\n");
bb2edc2e
SM
1203 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
1204 &hdev->reset_state)) {
1205 /* we could be here when either of below happens:
1206 * 1. reset was initiated due to watchdog timeout due to
1207 * a. IMP was earlier reset and our TX got choked down and
1208 * which resulted in watchdog reacting and inducing VF
1209 * reset. This also means our cmdq would be unreliable.
1210 * b. problem in TX due to other lower layer(example link
1211 * layer not functioning properly etc.)
1212 * 2. VF reset might have been initiated due to some config
1213 * change.
1214 *
1215 * NOTE: Theres no clear way to detect above cases than to react
1216 * to the response of PF for this reset request. PF will ack the
1217 * 1b and 2. cases but we will not get any intimation about 1a
1218 * from PF as cmdq would be in unreliable state i.e. mailbox
1219 * communication between PF and VF would be broken.
1220 */
1221
1222 /* if we are never geting into pending state it means either:
1223 * 1. PF is not receiving our request which could be due to IMP
1224 * reset
1225 * 2. PF is screwed
1226 * We cannot do much for 2. but to check first we can try reset
1227 * our PCIe + stack and see if it alleviates the problem.
1228 */
1229 if (hdev->reset_attempts > 3) {
1230 /* prepare for full reset of stack + pcie interface */
1231 hdev->nic.reset_level = HNAE3_VF_FULL_RESET;
1232
1233 /* "defer" schedule the reset task again */
1234 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1235 } else {
1236 hdev->reset_attempts++;
1237
1238 /* request PF for resetting this VF via mailbox */
d0e76212
SM
1239 ret = hclgevf_do_reset(hdev);
1240 if (ret)
1241 dev_warn(&hdev->pdev->dev,
1242 "VF rst fail, stack will call\n");
bb2edc2e
SM
1243 }
1244 }
f0412650
SM
1245
1246 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1247}
1248
5bc3f5f3
SM
1249static void hclgevf_mailbox_service_task(struct work_struct *work)
1250{
1251 struct hclgevf_dev *hdev;
1252
1253 hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
1254
1255 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
1256 return;
1257
1258 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1259
85a86c48 1260 hclgevf_mbx_async_handler(hdev);
5bc3f5f3
SM
1261
1262 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1263}
1264
1265static void hclgevf_service_task(struct work_struct *work)
1266{
1267 struct hclgevf_dev *hdev;
1268
1269 hdev = container_of(work, struct hclgevf_dev, service_task);
1270
1271 /* request the link status from the PF. PF would be able to tell VF
1272 * about such updates in future so we might remove this later
1273 */
1274 hclgevf_request_link_info(hdev);
1275
bb2edc2e
SM
1276 hclgevf_deferred_task_schedule(hdev);
1277
5bc3f5f3
SM
1278 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1279}
1280
1281static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
1282{
1283 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
1284}
1285
1286static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
1287{
1288 u32 cmdq_src_reg;
1289
1290 /* fetch the events from their corresponding regs */
1291 cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
1292 HCLGEVF_VECTOR0_CMDQ_SRC_REG);
1293
1294 /* check for vector0 mailbox(=CMDQ RX) event source */
1295 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
1296 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
1297 *clearval = cmdq_src_reg;
1298 return true;
1299 }
1300
1301 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
1302
1303 return false;
1304}
1305
1306static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1307{
1308 writel(en ? 1 : 0, vector->addr);
1309}
1310
1311static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
1312{
1313 struct hclgevf_dev *hdev = data;
1314 u32 clearval;
1315
1316 hclgevf_enable_vector(&hdev->misc_vector, false);
1317 if (!hclgevf_check_event_cause(hdev, &clearval))
1318 goto skip_sched;
1319
85a86c48 1320 hclgevf_mbx_handler(hdev);
5bc3f5f3
SM
1321
1322 hclgevf_clear_event_cause(hdev, clearval);
1323
1324skip_sched:
1325 hclgevf_enable_vector(&hdev->misc_vector, true);
1326
1327 return IRQ_HANDLED;
1328}
1329
1330static int hclgevf_configure(struct hclgevf_dev *hdev)
1331{
1332 int ret;
1333
1334 /* get queue configuration from PF */
1335 ret = hclge_get_queue_info(hdev);
1336 if (ret)
1337 return ret;
1338 /* get tc configuration from PF */
1339 return hclgevf_get_tc_info(hdev);
1340}
1341
1f05a70d
SM
1342static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
1343{
1344 struct pci_dev *pdev = ae_dev->pdev;
1345 struct hclgevf_dev *hdev = ae_dev->priv;
1346
1347 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1348 if (!hdev)
1349 return -ENOMEM;
1350
1351 hdev->pdev = pdev;
1352 hdev->ae_dev = ae_dev;
1353 ae_dev->priv = hdev;
1354
1355 return 0;
1356}
1357
5bc3f5f3
SM
1358static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
1359{
1360 struct hnae3_handle *roce = &hdev->roce;
1361 struct hnae3_handle *nic = &hdev->nic;
1362
1363 roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
1364
1365 if (hdev->num_msi_left < roce->rinfo.num_vectors ||
1366 hdev->num_msi_left == 0)
1367 return -EINVAL;
1368
1369 roce->rinfo.base_vector =
1370 hdev->vector_status[hdev->num_msi_used];
1371
1372 roce->rinfo.netdev = nic->kinfo.netdev;
1373 roce->rinfo.roce_io_base = hdev->hw.io_base;
1374
1375 roce->pdev = nic->pdev;
1376 roce->ae_algo = nic->ae_algo;
1377 roce->numa_node_mask = nic->numa_node_mask;
1378
1379 return 0;
1380}
1381
1382static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
1383{
1384 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
1385 int i, ret;
1386
1387 rss_cfg->rss_size = hdev->rss_size_max;
1388
1389 /* Initialize RSS indirect table for each vport */
1390 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
1391 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
1392
1393 ret = hclgevf_set_rss_indir_table(hdev);
1394 if (ret)
1395 return ret;
1396
1397 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
1398}
1399
1400static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
1401{
1402 /* other vlan config(like, VLAN TX/RX offload) would also be added
1403 * here later
1404 */
1405 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
1406 false);
1407}
1408
1409static int hclgevf_ae_start(struct hnae3_handle *handle)
1410{
1411 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1412 int i, queue_id;
1413
1414 for (i = 0; i < handle->kinfo.num_tqps; i++) {
1415 /* ring enable */
1416 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1417 if (queue_id < 0) {
1418 dev_warn(&hdev->pdev->dev,
1419 "Get invalid queue id, ignore it\n");
1420 continue;
1421 }
1422
1423 hclgevf_tqp_enable(hdev, queue_id, 0, true);
1424 }
1425
1426 /* reset tqp stats */
1427 hclgevf_reset_tqp_stats(handle);
1428
1429 hclgevf_request_link_info(hdev);
1430
1431 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1432 mod_timer(&hdev->service_timer, jiffies + HZ);
1433
1434 return 0;
1435}
1436
1437static void hclgevf_ae_stop(struct hnae3_handle *handle)
1438{
1439 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1440 int i, queue_id;
1441
1442 for (i = 0; i < hdev->num_tqps; i++) {
1443 /* Ring disable */
1444 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1445 if (queue_id < 0) {
1446 dev_warn(&hdev->pdev->dev,
1447 "Get invalid queue id, ignore it\n");
1448 continue;
1449 }
1450
1451 hclgevf_tqp_enable(hdev, queue_id, 0, false);
1452 }
1453
1454 /* reset tqp stats */
1455 hclgevf_reset_tqp_stats(handle);
d14992df
FL
1456 del_timer_sync(&hdev->service_timer);
1457 cancel_work_sync(&hdev->service_task);
42b11ab7 1458 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
d14992df 1459 hclgevf_update_link_status(hdev, 0);
5bc3f5f3
SM
1460}
1461
1462static void hclgevf_state_init(struct hclgevf_dev *hdev)
1463{
1f05a70d
SM
1464 /* if this is on going reset then skip this initialization */
1465 if (hclgevf_dev_ongoing_reset(hdev))
1466 return;
1467
5bc3f5f3
SM
1468 /* setup tasks for the MBX */
1469 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
1470 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1471 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1472
1473 /* setup tasks for service timer */
1474 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
1475
1476 INIT_WORK(&hdev->service_task, hclgevf_service_task);
1477 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1478
f0412650
SM
1479 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task);
1480
5bc3f5f3
SM
1481 mutex_init(&hdev->mbx_resp.mbx_mutex);
1482
1483 /* bring the device down */
1484 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1485}
1486
1487static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
1488{
1489 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1490
1491 if (hdev->service_timer.function)
1492 del_timer_sync(&hdev->service_timer);
1493 if (hdev->service_task.func)
1494 cancel_work_sync(&hdev->service_task);
1495 if (hdev->mbx_service_task.func)
1496 cancel_work_sync(&hdev->mbx_service_task);
f0412650
SM
1497 if (hdev->rst_service_task.func)
1498 cancel_work_sync(&hdev->rst_service_task);
5bc3f5f3
SM
1499
1500 mutex_destroy(&hdev->mbx_resp.mbx_mutex);
1501}
1502
1503static int hclgevf_init_msi(struct hclgevf_dev *hdev)
1504{
1505 struct pci_dev *pdev = hdev->pdev;
1506 int vectors;
1507 int i;
1508
1f05a70d
SM
1509 /* if this is on going reset then skip this initialization */
1510 if (hclgevf_dev_ongoing_reset(hdev))
1511 return 0;
1512
5bc3f5f3
SM
1513 hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
1514
1515 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1516 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1517 if (vectors < 0) {
1518 dev_err(&pdev->dev,
1519 "failed(%d) to allocate MSI/MSI-X vectors\n",
1520 vectors);
1521 return vectors;
1522 }
1523 if (vectors < hdev->num_msi)
1524 dev_warn(&hdev->pdev->dev,
1525 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1526 hdev->num_msi, vectors);
1527
1528 hdev->num_msi = vectors;
1529 hdev->num_msi_left = vectors;
1530 hdev->base_msi_vector = pdev->irq;
1531
1532 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1533 sizeof(u16), GFP_KERNEL);
1534 if (!hdev->vector_status) {
1535 pci_free_irq_vectors(pdev);
1536 return -ENOMEM;
1537 }
1538
1539 for (i = 0; i < hdev->num_msi; i++)
1540 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
1541
1542 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1543 sizeof(int), GFP_KERNEL);
1544 if (!hdev->vector_irq) {
1545 pci_free_irq_vectors(pdev);
1546 return -ENOMEM;
1547 }
1548
1549 return 0;
1550}
1551
1552static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
1553{
1554 struct pci_dev *pdev = hdev->pdev;
1555
1556 pci_free_irq_vectors(pdev);
1557}
1558
1559static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
1560{
1561 int ret = 0;
1562
1f05a70d
SM
1563 /* if this is on going reset then skip this initialization */
1564 if (hclgevf_dev_ongoing_reset(hdev))
1565 return 0;
1566
5bc3f5f3
SM
1567 hclgevf_get_misc_vector(hdev);
1568
1569 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
1570 0, "hclgevf_cmd", hdev);
1571 if (ret) {
1572 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
1573 hdev->misc_vector.vector_irq);
1574 return ret;
1575 }
1576
3efc51e2
XW
1577 hclgevf_clear_event_cause(hdev, 0);
1578
5bc3f5f3
SM
1579 /* enable misc. vector(vector 0) */
1580 hclgevf_enable_vector(&hdev->misc_vector, true);
1581
1582 return ret;
1583}
1584
1585static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
1586{
1587 /* disable misc vector(vector 0) */
1588 hclgevf_enable_vector(&hdev->misc_vector, false);
3efc51e2 1589 synchronize_irq(hdev->misc_vector.vector_irq);
5bc3f5f3
SM
1590 free_irq(hdev->misc_vector.vector_irq, hdev);
1591 hclgevf_free_vector(hdev, 0);
1592}
1593
a263ec71 1594static int hclgevf_init_instance(struct hclgevf_dev *hdev,
1595 struct hnae3_client *client)
5bc3f5f3
SM
1596{
1597 int ret;
1598
1599 switch (client->type) {
1600 case HNAE3_CLIENT_KNIC:
1601 hdev->nic_client = client;
1602 hdev->nic.client = client;
1603
1604 ret = client->ops->init_instance(&hdev->nic);
1605 if (ret)
1606 return ret;
1607
1608 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1609 struct hnae3_client *rc = hdev->roce_client;
1610
1611 ret = hclgevf_init_roce_base_info(hdev);
1612 if (ret)
1613 return ret;
1614 ret = rc->ops->init_instance(&hdev->roce);
1615 if (ret)
1616 return ret;
1617 }
1618 break;
1619 case HNAE3_CLIENT_UNIC:
1620 hdev->nic_client = client;
1621 hdev->nic.client = client;
1622
1623 ret = client->ops->init_instance(&hdev->nic);
1624 if (ret)
1625 return ret;
1626 break;
1627 case HNAE3_CLIENT_ROCE:
4cc40db1
LO
1628 if (hnae3_dev_roce_supported(hdev)) {
1629 hdev->roce_client = client;
1630 hdev->roce.client = client;
1631 }
5bc3f5f3 1632
4cc40db1 1633 if (hdev->roce_client && hdev->nic_client) {
5bc3f5f3
SM
1634 ret = hclgevf_init_roce_base_info(hdev);
1635 if (ret)
1636 return ret;
1637
1638 ret = client->ops->init_instance(&hdev->roce);
1639 if (ret)
1640 return ret;
1641 }
1642 }
1643
1644 return 0;
1645}
1646
a263ec71 1647static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
1648 struct hnae3_client *client)
5bc3f5f3
SM
1649{
1650 /* un-init roce, if it exists */
1651 if (hdev->roce_client)
1652 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
1653
1654 /* un-init nic/unic, if this was not called by roce client */
1655 if ((client->ops->uninit_instance) &&
1656 (client->type != HNAE3_CLIENT_ROCE))
1657 client->ops->uninit_instance(&hdev->nic, 0);
1658}
1659
a263ec71 1660static int hclgevf_register_client(struct hnae3_client *client,
1661 struct hnae3_ae_dev *ae_dev)
1662{
1663 struct hclgevf_dev *hdev = ae_dev->priv;
1664
1665 return hclgevf_init_instance(hdev, client);
1666}
1667
1668static void hclgevf_unregister_client(struct hnae3_client *client,
1669 struct hnae3_ae_dev *ae_dev)
1670{
1671 struct hclgevf_dev *hdev = ae_dev->priv;
1672
1673 hclgevf_uninit_instance(hdev, client);
1674}
1675
5bc3f5f3
SM
1676static int hclgevf_pci_init(struct hclgevf_dev *hdev)
1677{
1678 struct pci_dev *pdev = hdev->pdev;
1679 struct hclgevf_hw *hw;
1680 int ret;
1681
1f05a70d
SM
1682 /* check if we need to skip initialization of pci. This will happen if
1683 * device is undergoing VF reset. Otherwise, we would need to
1684 * re-initialize pci interface again i.e. when device is not going
1685 * through *any* reset or actually undergoing full reset.
1686 */
1687 if (hclgevf_dev_ongoing_reset(hdev))
1688 return 0;
1689
5bc3f5f3
SM
1690 ret = pci_enable_device(pdev);
1691 if (ret) {
1692 dev_err(&pdev->dev, "failed to enable PCI device\n");
6c46284e 1693 return ret;
5bc3f5f3
SM
1694 }
1695
1696 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1697 if (ret) {
1698 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
1699 goto err_disable_device;
1700 }
1701
1702 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
1703 if (ret) {
1704 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
1705 goto err_disable_device;
1706 }
1707
1708 pci_set_master(pdev);
1709 hw = &hdev->hw;
1710 hw->hdev = hdev;
d2f70b04 1711 hw->io_base = pci_iomap(pdev, 2, 0);
5bc3f5f3
SM
1712 if (!hw->io_base) {
1713 dev_err(&pdev->dev, "can't map configuration register space\n");
1714 ret = -ENOMEM;
1715 goto err_clr_master;
1716 }
1717
1718 return 0;
1719
1720err_clr_master:
1721 pci_clear_master(pdev);
1722 pci_release_regions(pdev);
1723err_disable_device:
1724 pci_disable_device(pdev);
6c46284e 1725
5bc3f5f3
SM
1726 return ret;
1727}
1728
1729static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
1730{
1731 struct pci_dev *pdev = hdev->pdev;
1732
1733 pci_iounmap(pdev, hdev->hw.io_base);
1734 pci_clear_master(pdev);
1735 pci_release_regions(pdev);
1736 pci_disable_device(pdev);
5bc3f5f3
SM
1737}
1738
1f05a70d 1739static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
5bc3f5f3 1740{
1f05a70d 1741 struct pci_dev *pdev = hdev->pdev;
5bc3f5f3
SM
1742 int ret;
1743
1f05a70d
SM
1744 /* check if device is on-going full reset(i.e. pcie as well) */
1745 if (hclgevf_dev_ongoing_full_reset(hdev)) {
1746 dev_warn(&pdev->dev, "device is going full reset\n");
1747 hclgevf_uninit_hdev(hdev);
1748 }
5bc3f5f3
SM
1749
1750 ret = hclgevf_pci_init(hdev);
1751 if (ret) {
1752 dev_err(&pdev->dev, "PCI initialization failed\n");
1753 return ret;
1754 }
1755
1756 ret = hclgevf_init_msi(hdev);
1757 if (ret) {
1758 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
1759 goto err_irq_init;
1760 }
1761
1762 hclgevf_state_init(hdev);
1763
f4d51c27
YL
1764 ret = hclgevf_cmd_init(hdev);
1765 if (ret)
1766 goto err_cmd_init;
1767
5bc3f5f3
SM
1768 ret = hclgevf_misc_irq_init(hdev);
1769 if (ret) {
1770 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
1771 ret);
1772 goto err_misc_irq_init;
1773 }
1774
5bc3f5f3
SM
1775 ret = hclgevf_configure(hdev);
1776 if (ret) {
1777 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
1778 goto err_config;
1779 }
1780
1781 ret = hclgevf_alloc_tqps(hdev);
1782 if (ret) {
1783 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
1784 goto err_config;
1785 }
1786
1787 ret = hclgevf_set_handle_info(hdev);
1788 if (ret) {
1789 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
1790 goto err_config;
1791 }
1792
038efa64
XW
1793 /* Initialize mta type for this VF */
1794 ret = hclgevf_cfg_func_mta_type(hdev);
5bc3f5f3
SM
1795 if (ret) {
1796 dev_err(&hdev->pdev->dev,
038efa64 1797 "failed(%d) to initialize MTA type\n", ret);
5bc3f5f3
SM
1798 goto err_config;
1799 }
1800
1801 /* Initialize RSS for this VF */
1802 ret = hclgevf_rss_init_hw(hdev);
1803 if (ret) {
1804 dev_err(&hdev->pdev->dev,
1805 "failed(%d) to initialize RSS\n", ret);
1806 goto err_config;
1807 }
1808
1809 ret = hclgevf_init_vlan_config(hdev);
1810 if (ret) {
1811 dev_err(&hdev->pdev->dev,
1812 "failed(%d) to initialize VLAN config\n", ret);
1813 goto err_config;
1814 }
1815
1816 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
1817
1818 return 0;
1819
1820err_config:
5bc3f5f3
SM
1821 hclgevf_misc_irq_uninit(hdev);
1822err_misc_irq_init:
f4d51c27
YL
1823 hclgevf_cmd_uninit(hdev);
1824err_cmd_init:
5bc3f5f3
SM
1825 hclgevf_state_uninit(hdev);
1826 hclgevf_uninit_msi(hdev);
1827err_irq_init:
1828 hclgevf_pci_uninit(hdev);
1829 return ret;
1830}
1831
1f05a70d 1832static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
5bc3f5f3 1833{
5bc3f5f3 1834 hclgevf_state_uninit(hdev);
f4d51c27
YL
1835 hclgevf_misc_irq_uninit(hdev);
1836 hclgevf_cmd_uninit(hdev);
5bc3f5f3
SM
1837 hclgevf_uninit_msi(hdev);
1838 hclgevf_pci_uninit(hdev);
1f05a70d
SM
1839}
1840
1841static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
1842{
1843 struct pci_dev *pdev = ae_dev->pdev;
1844 int ret;
1845
1846 ret = hclgevf_alloc_hdev(ae_dev);
1847 if (ret) {
1848 dev_err(&pdev->dev, "hclge device allocation failed\n");
1849 return ret;
1850 }
1851
1852 ret = hclgevf_init_hdev(ae_dev->priv);
1853 if (ret)
1854 dev_err(&pdev->dev, "hclge device initialization failed\n");
1855
1856 return ret;
1857}
1858
1859static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
1860{
1861 struct hclgevf_dev *hdev = ae_dev->priv;
1862
1863 hclgevf_uninit_hdev(hdev);
5bc3f5f3
SM
1864 ae_dev->priv = NULL;
1865}
1866
d65818a7
PL
1867static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
1868{
1869 struct hnae3_handle *nic = &hdev->nic;
1870 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1871
1872 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
1873}
1874
1875/**
1876 * hclgevf_get_channels - Get the current channels enabled and max supported.
1877 * @handle: hardware information for network interface
1878 * @ch: ethtool channels structure
1879 *
1880 * We don't support separate tx and rx queues as channels. The other count
1881 * represents how many queues are being used for control. max_combined counts
1882 * how many queue pairs we can support. They may not be mapped 1 to 1 with
1883 * q_vectors since we support a lot more queue pairs than q_vectors.
1884 **/
1885static void hclgevf_get_channels(struct hnae3_handle *handle,
1886 struct ethtool_channels *ch)
1887{
1888 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1889
1890 ch->max_combined = hclgevf_get_max_channels(hdev);
1891 ch->other_count = 0;
1892 ch->max_other = 0;
1893 ch->combined_count = hdev->num_tqps;
1894}
1895
f72ed0d9
PL
1896static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
1897 u16 *free_tqps, u16 *max_rss_size)
1898{
1899 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1900
1901 *free_tqps = 0;
1902 *max_rss_size = hdev->rss_size_max;
1903}
1904
16fc781e
FL
1905static int hclgevf_get_status(struct hnae3_handle *handle)
1906{
1907 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1908
1909 return hdev->hw.mac.link;
1910}
1911
98ffd995
FL
1912static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
1913 u8 *auto_neg, u32 *speed,
1914 u8 *duplex)
1915{
1916 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1917
1918 if (speed)
1919 *speed = hdev->hw.mac.speed;
1920 if (duplex)
1921 *duplex = hdev->hw.mac.duplex;
1922 if (auto_neg)
1923 *auto_neg = AUTONEG_DISABLE;
1924}
1925
1926void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
1927 u8 duplex)
1928{
1929 hdev->hw.mac.speed = speed;
1930 hdev->hw.mac.duplex = duplex;
1931}
1932
5bc3f5f3
SM
1933static const struct hnae3_ae_ops hclgevf_ops = {
1934 .init_ae_dev = hclgevf_init_ae_dev,
1935 .uninit_ae_dev = hclgevf_uninit_ae_dev,
a263ec71 1936 .init_client_instance = hclgevf_register_client,
1937 .uninit_client_instance = hclgevf_unregister_client,
5bc3f5f3
SM
1938 .start = hclgevf_ae_start,
1939 .stop = hclgevf_ae_stop,
1940 .map_ring_to_vector = hclgevf_map_ring_to_vector,
1941 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
1942 .get_vector = hclgevf_get_vector,
7412200c 1943 .put_vector = hclgevf_put_vector,
5bc3f5f3
SM
1944 .reset_queue = hclgevf_reset_tqp,
1945 .set_promisc_mode = hclgevf_set_promisc_mode,
1946 .get_mac_addr = hclgevf_get_mac_addr,
1947 .set_mac_addr = hclgevf_set_mac_addr,
1948 .add_uc_addr = hclgevf_add_uc_addr,
1949 .rm_uc_addr = hclgevf_rm_uc_addr,
1950 .add_mc_addr = hclgevf_add_mc_addr,
1951 .rm_mc_addr = hclgevf_rm_mc_addr,
038efa64 1952 .update_mta_status = hclgevf_update_mta_status,
5bc3f5f3
SM
1953 .get_stats = hclgevf_get_stats,
1954 .update_stats = hclgevf_update_stats,
1955 .get_strings = hclgevf_get_strings,
1956 .get_sset_count = hclgevf_get_sset_count,
1957 .get_rss_key_size = hclgevf_get_rss_key_size,
1958 .get_rss_indir_size = hclgevf_get_rss_indir_size,
1959 .get_rss = hclgevf_get_rss,
1960 .set_rss = hclgevf_set_rss,
1961 .get_tc_size = hclgevf_get_tc_size,
1962 .get_fw_version = hclgevf_get_fw_version,
1963 .set_vlan_filter = hclgevf_set_vlan_filter,
3849d494 1964 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
4aef908d 1965 .reset_event = hclgevf_reset_event,
d65818a7 1966 .get_channels = hclgevf_get_channels,
f72ed0d9 1967 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
16fc781e 1968 .get_status = hclgevf_get_status,
98ffd995 1969 .get_ksettings_an_result = hclgevf_get_ksettings_an_result,
5bc3f5f3
SM
1970};
1971
1972static struct hnae3_ae_algo ae_algovf = {
1973 .ops = &hclgevf_ops,
3519af01 1974 .name = HCLGEVF_NAME,
5bc3f5f3
SM
1975 .pdev_id_table = ae_algovf_pci_tbl,
1976};
1977
1978static int hclgevf_init(void)
1979{
1980 pr_info("%s is initializing\n", HCLGEVF_NAME);
1981
a4d090cc
FL
1982 hnae3_register_ae_algo(&ae_algovf);
1983
1984 return 0;
5bc3f5f3
SM
1985}
1986
1987static void hclgevf_exit(void)
1988{
1989 hnae3_unregister_ae_algo(&ae_algovf);
1990}
1991module_init(hclgevf_init);
1992module_exit(hclgevf_exit);
1993
1994MODULE_LICENSE("GPL");
1995MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
1996MODULE_DESCRIPTION("HCLGEVF Driver");
1997MODULE_VERSION(HCLGEVF_MOD_VERSION);