]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
54df2cb4671393ea8487aec6647a0a9705b0a7b3
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3vf / hclgevf_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/etherdevice.h>
5 #include <linux/iopoll.h>
6 #include <net/rtnetlink.h>
7 #include "hclgevf_cmd.h"
8 #include "hclgevf_main.h"
9 #include "hclge_mbx.h"
10 #include "hnae3.h"
11
12 #define HCLGEVF_NAME "hclgevf"
13
14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
15 static struct hnae3_ae_algo ae_algovf;
16
17 static const struct pci_device_id ae_algovf_pci_tbl[] = {
18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
20 /* required last entry */
21 {0, }
22 };
23
24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
25
26 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
27 HCLGEVF_CMDQ_TX_ADDR_H_REG,
28 HCLGEVF_CMDQ_TX_DEPTH_REG,
29 HCLGEVF_CMDQ_TX_TAIL_REG,
30 HCLGEVF_CMDQ_TX_HEAD_REG,
31 HCLGEVF_CMDQ_RX_ADDR_L_REG,
32 HCLGEVF_CMDQ_RX_ADDR_H_REG,
33 HCLGEVF_CMDQ_RX_DEPTH_REG,
34 HCLGEVF_CMDQ_RX_TAIL_REG,
35 HCLGEVF_CMDQ_RX_HEAD_REG,
36 HCLGEVF_VECTOR0_CMDQ_SRC_REG,
37 HCLGEVF_CMDQ_INTR_STS_REG,
38 HCLGEVF_CMDQ_INTR_EN_REG,
39 HCLGEVF_CMDQ_INTR_GEN_REG};
40
41 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
42 HCLGEVF_RST_ING,
43 HCLGEVF_GRO_EN_REG};
44
45 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
46 HCLGEVF_RING_RX_ADDR_H_REG,
47 HCLGEVF_RING_RX_BD_NUM_REG,
48 HCLGEVF_RING_RX_BD_LENGTH_REG,
49 HCLGEVF_RING_RX_MERGE_EN_REG,
50 HCLGEVF_RING_RX_TAIL_REG,
51 HCLGEVF_RING_RX_HEAD_REG,
52 HCLGEVF_RING_RX_FBD_NUM_REG,
53 HCLGEVF_RING_RX_OFFSET_REG,
54 HCLGEVF_RING_RX_FBD_OFFSET_REG,
55 HCLGEVF_RING_RX_STASH_REG,
56 HCLGEVF_RING_RX_BD_ERR_REG,
57 HCLGEVF_RING_TX_ADDR_L_REG,
58 HCLGEVF_RING_TX_ADDR_H_REG,
59 HCLGEVF_RING_TX_BD_NUM_REG,
60 HCLGEVF_RING_TX_PRIORITY_REG,
61 HCLGEVF_RING_TX_TC_REG,
62 HCLGEVF_RING_TX_MERGE_EN_REG,
63 HCLGEVF_RING_TX_TAIL_REG,
64 HCLGEVF_RING_TX_HEAD_REG,
65 HCLGEVF_RING_TX_FBD_NUM_REG,
66 HCLGEVF_RING_TX_OFFSET_REG,
67 HCLGEVF_RING_TX_EBD_NUM_REG,
68 HCLGEVF_RING_TX_EBD_OFFSET_REG,
69 HCLGEVF_RING_TX_BD_ERR_REG,
70 HCLGEVF_RING_EN_REG};
71
72 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
73 HCLGEVF_TQP_INTR_GL0_REG,
74 HCLGEVF_TQP_INTR_GL1_REG,
75 HCLGEVF_TQP_INTR_GL2_REG,
76 HCLGEVF_TQP_INTR_RL_REG};
77
78 static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
79 struct hnae3_handle *handle)
80 {
81 return container_of(handle, struct hclgevf_dev, nic);
82 }
83
84 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
85 {
86 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
87 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
88 struct hclgevf_desc desc;
89 struct hclgevf_tqp *tqp;
90 int status;
91 int i;
92
93 for (i = 0; i < kinfo->num_tqps; i++) {
94 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
95 hclgevf_cmd_setup_basic_desc(&desc,
96 HCLGEVF_OPC_QUERY_RX_STATUS,
97 true);
98
99 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
100 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
101 if (status) {
102 dev_err(&hdev->pdev->dev,
103 "Query tqp stat fail, status = %d,queue = %d\n",
104 status, i);
105 return status;
106 }
107 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
108 le32_to_cpu(desc.data[1]);
109
110 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
111 true);
112
113 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
114 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
115 if (status) {
116 dev_err(&hdev->pdev->dev,
117 "Query tqp stat fail, status = %d,queue = %d\n",
118 status, i);
119 return status;
120 }
121 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
122 le32_to_cpu(desc.data[1]);
123 }
124
125 return 0;
126 }
127
128 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
129 {
130 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
131 struct hclgevf_tqp *tqp;
132 u64 *buff = data;
133 int i;
134
135 for (i = 0; i < kinfo->num_tqps; i++) {
136 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
137 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
138 }
139 for (i = 0; i < kinfo->num_tqps; i++) {
140 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
141 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
142 }
143
144 return buff;
145 }
146
147 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
148 {
149 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
150
151 return kinfo->num_tqps * 2;
152 }
153
154 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
155 {
156 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
157 u8 *buff = data;
158 int i = 0;
159
160 for (i = 0; i < kinfo->num_tqps; i++) {
161 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
162 struct hclgevf_tqp, q);
163 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
164 tqp->index);
165 buff += ETH_GSTRING_LEN;
166 }
167
168 for (i = 0; i < kinfo->num_tqps; i++) {
169 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
170 struct hclgevf_tqp, q);
171 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
172 tqp->index);
173 buff += ETH_GSTRING_LEN;
174 }
175
176 return buff;
177 }
178
179 static void hclgevf_update_stats(struct hnae3_handle *handle,
180 struct net_device_stats *net_stats)
181 {
182 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
183 int status;
184
185 status = hclgevf_tqps_update_stats(handle);
186 if (status)
187 dev_err(&hdev->pdev->dev,
188 "VF update of TQPS stats fail, status = %d.\n",
189 status);
190 }
191
192 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
193 {
194 if (strset == ETH_SS_TEST)
195 return -EOPNOTSUPP;
196 else if (strset == ETH_SS_STATS)
197 return hclgevf_tqps_get_sset_count(handle, strset);
198
199 return 0;
200 }
201
202 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
203 u8 *data)
204 {
205 u8 *p = (char *)data;
206
207 if (strset == ETH_SS_STATS)
208 p = hclgevf_tqps_get_strings(handle, p);
209 }
210
211 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
212 {
213 hclgevf_tqps_get_stats(handle, data);
214 }
215
216 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
217 {
218 u8 resp_msg;
219 int status;
220
221 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
222 true, &resp_msg, sizeof(u8));
223 if (status) {
224 dev_err(&hdev->pdev->dev,
225 "VF request to get TC info from PF failed %d",
226 status);
227 return status;
228 }
229
230 hdev->hw_tc_map = resp_msg;
231
232 return 0;
233 }
234
235 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
236 {
237 #define HCLGEVF_TQPS_RSS_INFO_LEN 8
238 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
239 int status;
240
241 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
242 true, resp_msg,
243 HCLGEVF_TQPS_RSS_INFO_LEN);
244 if (status) {
245 dev_err(&hdev->pdev->dev,
246 "VF request to get tqp info from PF failed %d",
247 status);
248 return status;
249 }
250
251 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
252 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
253 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
254 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
255
256 return 0;
257 }
258
259 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
260 {
261 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
262 u8 msg_data[2], resp_data[2];
263 u16 qid_in_pf = 0;
264 int ret;
265
266 memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
267
268 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data,
269 2, true, resp_data, 2);
270 if (!ret)
271 qid_in_pf = *(u16 *)resp_data;
272
273 return qid_in_pf;
274 }
275
276 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
277 {
278 struct hclgevf_tqp *tqp;
279 int i;
280
281 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
282 sizeof(struct hclgevf_tqp), GFP_KERNEL);
283 if (!hdev->htqp)
284 return -ENOMEM;
285
286 tqp = hdev->htqp;
287
288 for (i = 0; i < hdev->num_tqps; i++) {
289 tqp->dev = &hdev->pdev->dev;
290 tqp->index = i;
291
292 tqp->q.ae_algo = &ae_algovf;
293 tqp->q.buf_size = hdev->rx_buf_len;
294 tqp->q.desc_num = hdev->num_desc;
295 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
296 i * HCLGEVF_TQP_REG_SIZE;
297
298 tqp++;
299 }
300
301 return 0;
302 }
303
304 static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
305 {
306 struct hnae3_handle *nic = &hdev->nic;
307 struct hnae3_knic_private_info *kinfo;
308 u16 new_tqps = hdev->num_tqps;
309 int i;
310
311 kinfo = &nic->kinfo;
312 kinfo->num_tc = 0;
313 kinfo->num_desc = hdev->num_desc;
314 kinfo->rx_buf_len = hdev->rx_buf_len;
315 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
316 if (hdev->hw_tc_map & BIT(i))
317 kinfo->num_tc++;
318
319 kinfo->rss_size
320 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
321 new_tqps = kinfo->rss_size * kinfo->num_tc;
322 kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
323
324 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
325 sizeof(struct hnae3_queue *), GFP_KERNEL);
326 if (!kinfo->tqp)
327 return -ENOMEM;
328
329 for (i = 0; i < kinfo->num_tqps; i++) {
330 hdev->htqp[i].q.handle = &hdev->nic;
331 hdev->htqp[i].q.tqp_index = i;
332 kinfo->tqp[i] = &hdev->htqp[i].q;
333 }
334
335 return 0;
336 }
337
338 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
339 {
340 int status;
341 u8 resp_msg;
342
343 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
344 0, false, &resp_msg, sizeof(u8));
345 if (status)
346 dev_err(&hdev->pdev->dev,
347 "VF failed to fetch link status(%d) from PF", status);
348 }
349
350 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
351 {
352 struct hnae3_handle *handle = &hdev->nic;
353 struct hnae3_client *client;
354
355 client = handle->client;
356
357 link_state =
358 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
359
360 if (link_state != hdev->hw.mac.link) {
361 client->ops->link_status_change(handle, !!link_state);
362 hdev->hw.mac.link = link_state;
363 }
364 }
365
366 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
367 {
368 struct hnae3_handle *nic = &hdev->nic;
369 int ret;
370
371 nic->ae_algo = &ae_algovf;
372 nic->pdev = hdev->pdev;
373 nic->numa_node_mask = hdev->numa_node_mask;
374 nic->flags |= HNAE3_SUPPORT_VF;
375
376 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
377 dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
378 hdev->ae_dev->dev_type);
379 return -EINVAL;
380 }
381
382 ret = hclgevf_knic_setup(hdev);
383 if (ret)
384 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
385 ret);
386 return ret;
387 }
388
389 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
390 {
391 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
392 dev_warn(&hdev->pdev->dev,
393 "vector(vector_id %d) has been freed.\n", vector_id);
394 return;
395 }
396
397 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
398 hdev->num_msi_left += 1;
399 hdev->num_msi_used -= 1;
400 }
401
402 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
403 struct hnae3_vector_info *vector_info)
404 {
405 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
406 struct hnae3_vector_info *vector = vector_info;
407 int alloc = 0;
408 int i, j;
409
410 vector_num = min(hdev->num_msi_left, vector_num);
411
412 for (j = 0; j < vector_num; j++) {
413 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
414 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
415 vector->vector = pci_irq_vector(hdev->pdev, i);
416 vector->io_addr = hdev->hw.io_base +
417 HCLGEVF_VECTOR_REG_BASE +
418 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
419 hdev->vector_status[i] = 0;
420 hdev->vector_irq[i] = vector->vector;
421
422 vector++;
423 alloc++;
424
425 break;
426 }
427 }
428 }
429 hdev->num_msi_left -= alloc;
430 hdev->num_msi_used += alloc;
431
432 return alloc;
433 }
434
435 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
436 {
437 int i;
438
439 for (i = 0; i < hdev->num_msi; i++)
440 if (vector == hdev->vector_irq[i])
441 return i;
442
443 return -EINVAL;
444 }
445
446 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
447 const u8 hfunc, const u8 *key)
448 {
449 struct hclgevf_rss_config_cmd *req;
450 struct hclgevf_desc desc;
451 int key_offset;
452 int key_size;
453 int ret;
454
455 req = (struct hclgevf_rss_config_cmd *)desc.data;
456
457 for (key_offset = 0; key_offset < 3; key_offset++) {
458 hclgevf_cmd_setup_basic_desc(&desc,
459 HCLGEVF_OPC_RSS_GENERIC_CONFIG,
460 false);
461
462 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
463 req->hash_config |=
464 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
465
466 if (key_offset == 2)
467 key_size =
468 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
469 else
470 key_size = HCLGEVF_RSS_HASH_KEY_NUM;
471
472 memcpy(req->hash_key,
473 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
474
475 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
476 if (ret) {
477 dev_err(&hdev->pdev->dev,
478 "Configure RSS config fail, status = %d\n",
479 ret);
480 return ret;
481 }
482 }
483
484 return 0;
485 }
486
487 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
488 {
489 return HCLGEVF_RSS_KEY_SIZE;
490 }
491
492 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
493 {
494 return HCLGEVF_RSS_IND_TBL_SIZE;
495 }
496
497 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
498 {
499 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
500 struct hclgevf_rss_indirection_table_cmd *req;
501 struct hclgevf_desc desc;
502 int status;
503 int i, j;
504
505 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
506
507 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
508 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
509 false);
510 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
511 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
512 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
513 req->rss_result[j] =
514 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
515
516 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
517 if (status) {
518 dev_err(&hdev->pdev->dev,
519 "VF failed(=%d) to set RSS indirection table\n",
520 status);
521 return status;
522 }
523 }
524
525 return 0;
526 }
527
528 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
529 {
530 struct hclgevf_rss_tc_mode_cmd *req;
531 u16 tc_offset[HCLGEVF_MAX_TC_NUM];
532 u16 tc_valid[HCLGEVF_MAX_TC_NUM];
533 u16 tc_size[HCLGEVF_MAX_TC_NUM];
534 struct hclgevf_desc desc;
535 u16 roundup_size;
536 int status;
537 int i;
538
539 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
540
541 roundup_size = roundup_pow_of_two(rss_size);
542 roundup_size = ilog2(roundup_size);
543
544 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
545 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
546 tc_size[i] = roundup_size;
547 tc_offset[i] = rss_size * i;
548 }
549
550 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
551 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
552 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
553 (tc_valid[i] & 0x1));
554 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
555 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
556 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
557 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
558 }
559 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
560 if (status)
561 dev_err(&hdev->pdev->dev,
562 "VF failed(=%d) to set rss tc mode\n", status);
563
564 return status;
565 }
566
567 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
568 u8 *hfunc)
569 {
570 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
571 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
572 int i;
573
574 if (handle->pdev->revision >= 0x21) {
575 /* Get hash algorithm */
576 if (hfunc) {
577 switch (rss_cfg->hash_algo) {
578 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
579 *hfunc = ETH_RSS_HASH_TOP;
580 break;
581 case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
582 *hfunc = ETH_RSS_HASH_XOR;
583 break;
584 default:
585 *hfunc = ETH_RSS_HASH_UNKNOWN;
586 break;
587 }
588 }
589
590 /* Get the RSS Key required by the user */
591 if (key)
592 memcpy(key, rss_cfg->rss_hash_key,
593 HCLGEVF_RSS_KEY_SIZE);
594 }
595
596 if (indir)
597 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
598 indir[i] = rss_cfg->rss_indirection_tbl[i];
599
600 return 0;
601 }
602
603 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
604 const u8 *key, const u8 hfunc)
605 {
606 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
607 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
608 int ret, i;
609
610 if (handle->pdev->revision >= 0x21) {
611 /* Set the RSS Hash Key if specififed by the user */
612 if (key) {
613 switch (hfunc) {
614 case ETH_RSS_HASH_TOP:
615 rss_cfg->hash_algo =
616 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
617 break;
618 case ETH_RSS_HASH_XOR:
619 rss_cfg->hash_algo =
620 HCLGEVF_RSS_HASH_ALGO_SIMPLE;
621 break;
622 case ETH_RSS_HASH_NO_CHANGE:
623 break;
624 default:
625 return -EINVAL;
626 }
627
628 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
629 key);
630 if (ret)
631 return ret;
632
633 /* Update the shadow RSS key with user specified qids */
634 memcpy(rss_cfg->rss_hash_key, key,
635 HCLGEVF_RSS_KEY_SIZE);
636 }
637 }
638
639 /* update the shadow RSS table with user specified qids */
640 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
641 rss_cfg->rss_indirection_tbl[i] = indir[i];
642
643 /* update the hardware */
644 return hclgevf_set_rss_indir_table(hdev);
645 }
646
647 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
648 {
649 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
650
651 if (nfc->data & RXH_L4_B_2_3)
652 hash_sets |= HCLGEVF_D_PORT_BIT;
653 else
654 hash_sets &= ~HCLGEVF_D_PORT_BIT;
655
656 if (nfc->data & RXH_IP_SRC)
657 hash_sets |= HCLGEVF_S_IP_BIT;
658 else
659 hash_sets &= ~HCLGEVF_S_IP_BIT;
660
661 if (nfc->data & RXH_IP_DST)
662 hash_sets |= HCLGEVF_D_IP_BIT;
663 else
664 hash_sets &= ~HCLGEVF_D_IP_BIT;
665
666 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
667 hash_sets |= HCLGEVF_V_TAG_BIT;
668
669 return hash_sets;
670 }
671
672 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
673 struct ethtool_rxnfc *nfc)
674 {
675 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
676 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
677 struct hclgevf_rss_input_tuple_cmd *req;
678 struct hclgevf_desc desc;
679 u8 tuple_sets;
680 int ret;
681
682 if (handle->pdev->revision == 0x20)
683 return -EOPNOTSUPP;
684
685 if (nfc->data &
686 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
687 return -EINVAL;
688
689 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
690 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
691
692 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
693 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
694 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
695 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
696 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
697 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
698 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
699 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
700
701 tuple_sets = hclgevf_get_rss_hash_bits(nfc);
702 switch (nfc->flow_type) {
703 case TCP_V4_FLOW:
704 req->ipv4_tcp_en = tuple_sets;
705 break;
706 case TCP_V6_FLOW:
707 req->ipv6_tcp_en = tuple_sets;
708 break;
709 case UDP_V4_FLOW:
710 req->ipv4_udp_en = tuple_sets;
711 break;
712 case UDP_V6_FLOW:
713 req->ipv6_udp_en = tuple_sets;
714 break;
715 case SCTP_V4_FLOW:
716 req->ipv4_sctp_en = tuple_sets;
717 break;
718 case SCTP_V6_FLOW:
719 if ((nfc->data & RXH_L4_B_0_1) ||
720 (nfc->data & RXH_L4_B_2_3))
721 return -EINVAL;
722
723 req->ipv6_sctp_en = tuple_sets;
724 break;
725 case IPV4_FLOW:
726 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
727 break;
728 case IPV6_FLOW:
729 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
730 break;
731 default:
732 return -EINVAL;
733 }
734
735 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
736 if (ret) {
737 dev_err(&hdev->pdev->dev,
738 "Set rss tuple fail, status = %d\n", ret);
739 return ret;
740 }
741
742 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
743 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
744 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
745 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
746 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
747 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
748 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
749 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
750 return 0;
751 }
752
753 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
754 struct ethtool_rxnfc *nfc)
755 {
756 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
757 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
758 u8 tuple_sets;
759
760 if (handle->pdev->revision == 0x20)
761 return -EOPNOTSUPP;
762
763 nfc->data = 0;
764
765 switch (nfc->flow_type) {
766 case TCP_V4_FLOW:
767 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
768 break;
769 case UDP_V4_FLOW:
770 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
771 break;
772 case TCP_V6_FLOW:
773 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
774 break;
775 case UDP_V6_FLOW:
776 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
777 break;
778 case SCTP_V4_FLOW:
779 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
780 break;
781 case SCTP_V6_FLOW:
782 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
783 break;
784 case IPV4_FLOW:
785 case IPV6_FLOW:
786 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
787 break;
788 default:
789 return -EINVAL;
790 }
791
792 if (!tuple_sets)
793 return 0;
794
795 if (tuple_sets & HCLGEVF_D_PORT_BIT)
796 nfc->data |= RXH_L4_B_2_3;
797 if (tuple_sets & HCLGEVF_S_PORT_BIT)
798 nfc->data |= RXH_L4_B_0_1;
799 if (tuple_sets & HCLGEVF_D_IP_BIT)
800 nfc->data |= RXH_IP_DST;
801 if (tuple_sets & HCLGEVF_S_IP_BIT)
802 nfc->data |= RXH_IP_SRC;
803
804 return 0;
805 }
806
807 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
808 struct hclgevf_rss_cfg *rss_cfg)
809 {
810 struct hclgevf_rss_input_tuple_cmd *req;
811 struct hclgevf_desc desc;
812 int ret;
813
814 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
815
816 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
817
818 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
819 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
820 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
821 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
822 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
823 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
824 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
825 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
826
827 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
828 if (ret)
829 dev_err(&hdev->pdev->dev,
830 "Configure rss input fail, status = %d\n", ret);
831 return ret;
832 }
833
834 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
835 {
836 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
837 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
838
839 return rss_cfg->rss_size;
840 }
841
842 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
843 int vector_id,
844 struct hnae3_ring_chain_node *ring_chain)
845 {
846 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
847 struct hnae3_ring_chain_node *node;
848 struct hclge_mbx_vf_to_pf_cmd *req;
849 struct hclgevf_desc desc;
850 int i = 0;
851 int status;
852 u8 type;
853
854 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
855
856 for (node = ring_chain; node; node = node->next) {
857 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
858 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i;
859
860 if (i == 0) {
861 hclgevf_cmd_setup_basic_desc(&desc,
862 HCLGEVF_OPC_MBX_VF_TO_PF,
863 false);
864 type = en ?
865 HCLGE_MBX_MAP_RING_TO_VECTOR :
866 HCLGE_MBX_UNMAP_RING_TO_VECTOR;
867 req->msg[0] = type;
868 req->msg[1] = vector_id;
869 }
870
871 req->msg[idx_offset] =
872 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
873 req->msg[idx_offset + 1] = node->tqp_index;
874 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
875 HNAE3_RING_GL_IDX_M,
876 HNAE3_RING_GL_IDX_S);
877
878 i++;
879 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
880 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
881 HCLGE_MBX_RING_NODE_VARIABLE_NUM) ||
882 !node->next) {
883 req->msg[2] = i;
884
885 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
886 if (status) {
887 dev_err(&hdev->pdev->dev,
888 "Map TQP fail, status is %d.\n",
889 status);
890 return status;
891 }
892 i = 0;
893 hclgevf_cmd_setup_basic_desc(&desc,
894 HCLGEVF_OPC_MBX_VF_TO_PF,
895 false);
896 req->msg[0] = type;
897 req->msg[1] = vector_id;
898 }
899 }
900
901 return 0;
902 }
903
904 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
905 struct hnae3_ring_chain_node *ring_chain)
906 {
907 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
908 int vector_id;
909
910 vector_id = hclgevf_get_vector_index(hdev, vector);
911 if (vector_id < 0) {
912 dev_err(&handle->pdev->dev,
913 "Get vector index fail. ret =%d\n", vector_id);
914 return vector_id;
915 }
916
917 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
918 }
919
920 static int hclgevf_unmap_ring_from_vector(
921 struct hnae3_handle *handle,
922 int vector,
923 struct hnae3_ring_chain_node *ring_chain)
924 {
925 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
926 int ret, vector_id;
927
928 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
929 return 0;
930
931 vector_id = hclgevf_get_vector_index(hdev, vector);
932 if (vector_id < 0) {
933 dev_err(&handle->pdev->dev,
934 "Get vector index fail. ret =%d\n", vector_id);
935 return vector_id;
936 }
937
938 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
939 if (ret)
940 dev_err(&handle->pdev->dev,
941 "Unmap ring from vector fail. vector=%d, ret =%d\n",
942 vector_id,
943 ret);
944
945 return ret;
946 }
947
948 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
949 {
950 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
951 int vector_id;
952
953 vector_id = hclgevf_get_vector_index(hdev, vector);
954 if (vector_id < 0) {
955 dev_err(&handle->pdev->dev,
956 "hclgevf_put_vector get vector index fail. ret =%d\n",
957 vector_id);
958 return vector_id;
959 }
960
961 hclgevf_free_vector(hdev, vector_id);
962
963 return 0;
964 }
965
966 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
967 bool en_uc_pmc, bool en_mc_pmc)
968 {
969 struct hclge_mbx_vf_to_pf_cmd *req;
970 struct hclgevf_desc desc;
971 int status;
972
973 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
974
975 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
976 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
977 req->msg[1] = en_uc_pmc ? 1 : 0;
978 req->msg[2] = en_mc_pmc ? 1 : 0;
979
980 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
981 if (status)
982 dev_err(&hdev->pdev->dev,
983 "Set promisc mode fail, status is %d.\n", status);
984
985 return status;
986 }
987
988 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle,
989 bool en_uc_pmc, bool en_mc_pmc)
990 {
991 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
992
993 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
994 }
995
996 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
997 int stream_id, bool enable)
998 {
999 struct hclgevf_cfg_com_tqp_queue_cmd *req;
1000 struct hclgevf_desc desc;
1001 int status;
1002
1003 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
1004
1005 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
1006 false);
1007 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
1008 req->stream_id = cpu_to_le16(stream_id);
1009 req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
1010
1011 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1012 if (status)
1013 dev_err(&hdev->pdev->dev,
1014 "TQP enable fail, status =%d.\n", status);
1015
1016 return status;
1017 }
1018
1019 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
1020 {
1021 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
1022 struct hclgevf_tqp *tqp;
1023 int i;
1024
1025 for (i = 0; i < kinfo->num_tqps; i++) {
1026 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
1027 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
1028 }
1029 }
1030
1031 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
1032 {
1033 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1034
1035 ether_addr_copy(p, hdev->hw.mac.mac_addr);
1036 }
1037
1038 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
1039 bool is_first)
1040 {
1041 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1042 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
1043 u8 *new_mac_addr = (u8 *)p;
1044 u8 msg_data[ETH_ALEN * 2];
1045 u16 subcode;
1046 int status;
1047
1048 ether_addr_copy(msg_data, new_mac_addr);
1049 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
1050
1051 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
1052 HCLGE_MBX_MAC_VLAN_UC_MODIFY;
1053
1054 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
1055 subcode, msg_data, ETH_ALEN * 2,
1056 true, NULL, 0);
1057 if (!status)
1058 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
1059
1060 return status;
1061 }
1062
1063 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
1064 const unsigned char *addr)
1065 {
1066 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1067
1068 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
1069 HCLGE_MBX_MAC_VLAN_UC_ADD,
1070 addr, ETH_ALEN, false, NULL, 0);
1071 }
1072
1073 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
1074 const unsigned char *addr)
1075 {
1076 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1077
1078 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
1079 HCLGE_MBX_MAC_VLAN_UC_REMOVE,
1080 addr, ETH_ALEN, false, NULL, 0);
1081 }
1082
1083 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
1084 const unsigned char *addr)
1085 {
1086 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1087
1088 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
1089 HCLGE_MBX_MAC_VLAN_MC_ADD,
1090 addr, ETH_ALEN, false, NULL, 0);
1091 }
1092
1093 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
1094 const unsigned char *addr)
1095 {
1096 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1097
1098 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
1099 HCLGE_MBX_MAC_VLAN_MC_REMOVE,
1100 addr, ETH_ALEN, false, NULL, 0);
1101 }
1102
1103 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
1104 __be16 proto, u16 vlan_id,
1105 bool is_kill)
1106 {
1107 #define HCLGEVF_VLAN_MBX_MSG_LEN 5
1108 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1109 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
1110
1111 if (vlan_id > 4095)
1112 return -EINVAL;
1113
1114 if (proto != htons(ETH_P_8021Q))
1115 return -EPROTONOSUPPORT;
1116
1117 msg_data[0] = is_kill;
1118 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
1119 memcpy(&msg_data[3], &proto, sizeof(proto));
1120 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
1121 HCLGE_MBX_VLAN_FILTER, msg_data,
1122 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
1123 }
1124
1125 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
1126 {
1127 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1128 u8 msg_data;
1129
1130 msg_data = enable ? 1 : 0;
1131 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
1132 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
1133 1, false, NULL, 0);
1134 }
1135
1136 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
1137 {
1138 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1139 u8 msg_data[2];
1140 int ret;
1141
1142 memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
1143
1144 /* disable vf queue before send queue reset msg to PF */
1145 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
1146 if (ret)
1147 return ret;
1148
1149 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
1150 2, true, NULL, 0);
1151 }
1152
1153 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
1154 {
1155 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1156
1157 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu,
1158 sizeof(new_mtu), true, NULL, 0);
1159 }
1160
1161 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
1162 enum hnae3_reset_notify_type type)
1163 {
1164 struct hnae3_client *client = hdev->nic_client;
1165 struct hnae3_handle *handle = &hdev->nic;
1166 int ret;
1167
1168 if (!client->ops->reset_notify)
1169 return -EOPNOTSUPP;
1170
1171 ret = client->ops->reset_notify(handle, type);
1172 if (ret)
1173 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
1174 type, ret);
1175
1176 return ret;
1177 }
1178
1179 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
1180 {
1181 struct hclgevf_dev *hdev = ae_dev->priv;
1182
1183 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
1184 }
1185
1186 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev,
1187 unsigned long delay_us,
1188 unsigned long wait_cnt)
1189 {
1190 unsigned long cnt = 0;
1191
1192 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
1193 cnt++ < wait_cnt)
1194 usleep_range(delay_us, delay_us * 2);
1195
1196 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
1197 dev_err(&hdev->pdev->dev,
1198 "flr wait timeout\n");
1199 return -ETIMEDOUT;
1200 }
1201
1202 return 0;
1203 }
1204
1205 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
1206 {
1207 #define HCLGEVF_RESET_WAIT_US 20000
1208 #define HCLGEVF_RESET_WAIT_CNT 2000
1209 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \
1210 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1211
1212 u32 val;
1213 int ret;
1214
1215 /* wait to check the hardware reset completion status */
1216 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
1217 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
1218
1219 if (hdev->reset_type == HNAE3_FLR_RESET)
1220 return hclgevf_flr_poll_timeout(hdev,
1221 HCLGEVF_RESET_WAIT_US,
1222 HCLGEVF_RESET_WAIT_CNT);
1223
1224 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
1225 !(val & HCLGEVF_RST_ING_BITS),
1226 HCLGEVF_RESET_WAIT_US,
1227 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1228
1229 /* hardware completion status should be available by this time */
1230 if (ret) {
1231 dev_err(&hdev->pdev->dev,
1232 "could'nt get reset done status from h/w, timeout!\n");
1233 return ret;
1234 }
1235
1236 /* we will wait a bit more to let reset of the stack to complete. This
1237 * might happen in case reset assertion was made by PF. Yes, this also
1238 * means we might end up waiting bit more even for VF reset.
1239 */
1240 msleep(5000);
1241
1242 return 0;
1243 }
1244
1245 static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1246 {
1247 int ret;
1248
1249 /* uninitialize the nic client */
1250 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1251 if (ret)
1252 return ret;
1253
1254 /* re-initialize the hclge device */
1255 ret = hclgevf_reset_hdev(hdev);
1256 if (ret) {
1257 dev_err(&hdev->pdev->dev,
1258 "hclge device re-init failed, VF is disabled!\n");
1259 return ret;
1260 }
1261
1262 /* bring up the nic client again */
1263 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1264 if (ret)
1265 return ret;
1266
1267 return 0;
1268 }
1269
1270 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
1271 {
1272 int ret = 0;
1273
1274 switch (hdev->reset_type) {
1275 case HNAE3_VF_FUNC_RESET:
1276 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
1277 0, true, NULL, sizeof(u8));
1278 break;
1279 case HNAE3_FLR_RESET:
1280 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
1281 break;
1282 default:
1283 break;
1284 }
1285
1286 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
1287
1288 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
1289 hdev->reset_type, ret);
1290
1291 return ret;
1292 }
1293
1294 static int hclgevf_reset(struct hclgevf_dev *hdev)
1295 {
1296 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1297 int ret;
1298
1299 /* Initialize ae_dev reset status as well, in case enet layer wants to
1300 * know if device is undergoing reset
1301 */
1302 ae_dev->reset_type = hdev->reset_type;
1303 hdev->reset_count++;
1304 rtnl_lock();
1305
1306 /* bring down the nic to stop any ongoing TX/RX */
1307 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1308 if (ret)
1309 goto err_reset_lock;
1310
1311 rtnl_unlock();
1312
1313 ret = hclgevf_reset_prepare_wait(hdev);
1314 if (ret)
1315 goto err_reset;
1316
1317 /* check if VF could successfully fetch the hardware reset completion
1318 * status from the hardware
1319 */
1320 ret = hclgevf_reset_wait(hdev);
1321 if (ret) {
1322 /* can't do much in this situation, will disable VF */
1323 dev_err(&hdev->pdev->dev,
1324 "VF failed(=%d) to fetch H/W reset completion status\n",
1325 ret);
1326 goto err_reset;
1327 }
1328
1329 rtnl_lock();
1330
1331 /* now, re-initialize the nic client and ae device*/
1332 ret = hclgevf_reset_stack(hdev);
1333 if (ret) {
1334 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1335 goto err_reset_lock;
1336 }
1337
1338 /* bring up the nic to enable TX/RX again */
1339 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1340 if (ret)
1341 goto err_reset_lock;
1342
1343 rtnl_unlock();
1344
1345 hdev->last_reset_time = jiffies;
1346 ae_dev->reset_type = HNAE3_NONE_RESET;
1347
1348 return ret;
1349 err_reset_lock:
1350 rtnl_unlock();
1351 err_reset:
1352 /* When VF reset failed, only the higher level reset asserted by PF
1353 * can restore it, so re-initialize the command queue to receive
1354 * this higher reset event.
1355 */
1356 hclgevf_cmd_init(hdev);
1357 dev_err(&hdev->pdev->dev, "failed to reset VF\n");
1358
1359 return ret;
1360 }
1361
1362 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
1363 unsigned long *addr)
1364 {
1365 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
1366
1367 /* return the highest priority reset level amongst all */
1368 if (test_bit(HNAE3_VF_RESET, addr)) {
1369 rst_level = HNAE3_VF_RESET;
1370 clear_bit(HNAE3_VF_RESET, addr);
1371 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1372 clear_bit(HNAE3_VF_FUNC_RESET, addr);
1373 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
1374 rst_level = HNAE3_VF_FULL_RESET;
1375 clear_bit(HNAE3_VF_FULL_RESET, addr);
1376 clear_bit(HNAE3_VF_FUNC_RESET, addr);
1377 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
1378 rst_level = HNAE3_VF_PF_FUNC_RESET;
1379 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1380 clear_bit(HNAE3_VF_FUNC_RESET, addr);
1381 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
1382 rst_level = HNAE3_VF_FUNC_RESET;
1383 clear_bit(HNAE3_VF_FUNC_RESET, addr);
1384 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
1385 rst_level = HNAE3_FLR_RESET;
1386 clear_bit(HNAE3_FLR_RESET, addr);
1387 }
1388
1389 return rst_level;
1390 }
1391
1392 static void hclgevf_reset_event(struct pci_dev *pdev,
1393 struct hnae3_handle *handle)
1394 {
1395 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1396 struct hclgevf_dev *hdev = ae_dev->priv;
1397
1398 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
1399
1400 if (hdev->default_reset_request)
1401 hdev->reset_level =
1402 hclgevf_get_reset_level(hdev,
1403 &hdev->default_reset_request);
1404 else
1405 hdev->reset_level = HNAE3_VF_FUNC_RESET;
1406
1407 /* reset of this VF requested */
1408 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
1409 hclgevf_reset_task_schedule(hdev);
1410
1411 hdev->last_reset_time = jiffies;
1412 }
1413
1414 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
1415 enum hnae3_reset_type rst_type)
1416 {
1417 struct hclgevf_dev *hdev = ae_dev->priv;
1418
1419 set_bit(rst_type, &hdev->default_reset_request);
1420 }
1421
1422 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
1423 {
1424 #define HCLGEVF_FLR_WAIT_MS 100
1425 #define HCLGEVF_FLR_WAIT_CNT 50
1426 struct hclgevf_dev *hdev = ae_dev->priv;
1427 int cnt = 0;
1428
1429 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
1430 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
1431 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
1432 hclgevf_reset_event(hdev->pdev, NULL);
1433
1434 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
1435 cnt++ < HCLGEVF_FLR_WAIT_CNT)
1436 msleep(HCLGEVF_FLR_WAIT_MS);
1437
1438 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
1439 dev_err(&hdev->pdev->dev,
1440 "flr wait down timeout: %d\n", cnt);
1441 }
1442
1443 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
1444 {
1445 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1446
1447 return hdev->fw_version;
1448 }
1449
1450 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
1451 {
1452 struct hclgevf_misc_vector *vector = &hdev->misc_vector;
1453
1454 vector->vector_irq = pci_irq_vector(hdev->pdev,
1455 HCLGEVF_MISC_VECTOR_NUM);
1456 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
1457 /* vector status always valid for Vector 0 */
1458 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
1459 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
1460
1461 hdev->num_msi_left -= 1;
1462 hdev->num_msi_used += 1;
1463 }
1464
1465 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
1466 {
1467 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
1468 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
1469 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1470 schedule_work(&hdev->rst_service_task);
1471 }
1472 }
1473
1474 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
1475 {
1476 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
1477 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
1478 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1479 schedule_work(&hdev->mbx_service_task);
1480 }
1481 }
1482
1483 static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
1484 {
1485 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
1486 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
1487 schedule_work(&hdev->service_task);
1488 }
1489
1490 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
1491 {
1492 /* if we have any pending mailbox event then schedule the mbx task */
1493 if (hdev->mbx_event_pending)
1494 hclgevf_mbx_task_schedule(hdev);
1495
1496 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
1497 hclgevf_reset_task_schedule(hdev);
1498 }
1499
1500 static void hclgevf_service_timer(struct timer_list *t)
1501 {
1502 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
1503
1504 mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
1505
1506 hclgevf_task_schedule(hdev);
1507 }
1508
1509 static void hclgevf_reset_service_task(struct work_struct *work)
1510 {
1511 struct hclgevf_dev *hdev =
1512 container_of(work, struct hclgevf_dev, rst_service_task);
1513 int ret;
1514
1515 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1516 return;
1517
1518 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1519
1520 if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
1521 &hdev->reset_state)) {
1522 /* PF has initmated that it is about to reset the hardware.
1523 * We now have to poll & check if harware has actually completed
1524 * the reset sequence. On hardware reset completion, VF needs to
1525 * reset the client and ae device.
1526 */
1527 hdev->reset_attempts = 0;
1528
1529 hdev->last_reset_time = jiffies;
1530 while ((hdev->reset_type =
1531 hclgevf_get_reset_level(hdev, &hdev->reset_pending))
1532 != HNAE3_NONE_RESET) {
1533 ret = hclgevf_reset(hdev);
1534 if (ret)
1535 dev_err(&hdev->pdev->dev,
1536 "VF stack reset failed %d.\n", ret);
1537 }
1538 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
1539 &hdev->reset_state)) {
1540 /* we could be here when either of below happens:
1541 * 1. reset was initiated due to watchdog timeout due to
1542 * a. IMP was earlier reset and our TX got choked down and
1543 * which resulted in watchdog reacting and inducing VF
1544 * reset. This also means our cmdq would be unreliable.
1545 * b. problem in TX due to other lower layer(example link
1546 * layer not functioning properly etc.)
1547 * 2. VF reset might have been initiated due to some config
1548 * change.
1549 *
1550 * NOTE: Theres no clear way to detect above cases than to react
1551 * to the response of PF for this reset request. PF will ack the
1552 * 1b and 2. cases but we will not get any intimation about 1a
1553 * from PF as cmdq would be in unreliable state i.e. mailbox
1554 * communication between PF and VF would be broken.
1555 */
1556
1557 /* if we are never geting into pending state it means either:
1558 * 1. PF is not receiving our request which could be due to IMP
1559 * reset
1560 * 2. PF is screwed
1561 * We cannot do much for 2. but to check first we can try reset
1562 * our PCIe + stack and see if it alleviates the problem.
1563 */
1564 if (hdev->reset_attempts > 3) {
1565 /* prepare for full reset of stack + pcie interface */
1566 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
1567
1568 /* "defer" schedule the reset task again */
1569 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1570 } else {
1571 hdev->reset_attempts++;
1572
1573 set_bit(hdev->reset_level, &hdev->reset_pending);
1574 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1575 }
1576 hclgevf_reset_task_schedule(hdev);
1577 }
1578
1579 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1580 }
1581
1582 static void hclgevf_mailbox_service_task(struct work_struct *work)
1583 {
1584 struct hclgevf_dev *hdev;
1585
1586 hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
1587
1588 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
1589 return;
1590
1591 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1592
1593 hclgevf_mbx_async_handler(hdev);
1594
1595 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1596 }
1597
1598 static void hclgevf_keep_alive_timer(struct timer_list *t)
1599 {
1600 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
1601
1602 schedule_work(&hdev->keep_alive_task);
1603 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
1604 }
1605
1606 static void hclgevf_keep_alive_task(struct work_struct *work)
1607 {
1608 struct hclgevf_dev *hdev;
1609 u8 respmsg;
1610 int ret;
1611
1612 hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
1613 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
1614 0, false, &respmsg, sizeof(u8));
1615 if (ret)
1616 dev_err(&hdev->pdev->dev,
1617 "VF sends keep alive cmd failed(=%d)\n", ret);
1618 }
1619
1620 static void hclgevf_service_task(struct work_struct *work)
1621 {
1622 struct hclgevf_dev *hdev;
1623
1624 hdev = container_of(work, struct hclgevf_dev, service_task);
1625
1626 /* request the link status from the PF. PF would be able to tell VF
1627 * about such updates in future so we might remove this later
1628 */
1629 hclgevf_request_link_info(hdev);
1630
1631 hclgevf_deferred_task_schedule(hdev);
1632
1633 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1634 }
1635
1636 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
1637 {
1638 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
1639 }
1640
1641 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
1642 u32 *clearval)
1643 {
1644 u32 cmdq_src_reg, rst_ing_reg;
1645
1646 /* fetch the events from their corresponding regs */
1647 cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
1648 HCLGEVF_VECTOR0_CMDQ_SRC_REG);
1649
1650 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
1651 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
1652 dev_info(&hdev->pdev->dev,
1653 "receive reset interrupt 0x%x!\n", rst_ing_reg);
1654 set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
1655 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1656 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
1657 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
1658 *clearval = cmdq_src_reg;
1659 return HCLGEVF_VECTOR0_EVENT_RST;
1660 }
1661
1662 /* check for vector0 mailbox(=CMDQ RX) event source */
1663 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
1664 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
1665 *clearval = cmdq_src_reg;
1666 return HCLGEVF_VECTOR0_EVENT_MBX;
1667 }
1668
1669 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
1670
1671 return HCLGEVF_VECTOR0_EVENT_OTHER;
1672 }
1673
1674 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1675 {
1676 writel(en ? 1 : 0, vector->addr);
1677 }
1678
1679 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
1680 {
1681 enum hclgevf_evt_cause event_cause;
1682 struct hclgevf_dev *hdev = data;
1683 u32 clearval;
1684
1685 hclgevf_enable_vector(&hdev->misc_vector, false);
1686 event_cause = hclgevf_check_evt_cause(hdev, &clearval);
1687
1688 switch (event_cause) {
1689 case HCLGEVF_VECTOR0_EVENT_RST:
1690 hclgevf_reset_task_schedule(hdev);
1691 break;
1692 case HCLGEVF_VECTOR0_EVENT_MBX:
1693 hclgevf_mbx_handler(hdev);
1694 break;
1695 default:
1696 break;
1697 }
1698
1699 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
1700 hclgevf_clear_event_cause(hdev, clearval);
1701 hclgevf_enable_vector(&hdev->misc_vector, true);
1702 }
1703
1704 return IRQ_HANDLED;
1705 }
1706
1707 static int hclgevf_configure(struct hclgevf_dev *hdev)
1708 {
1709 int ret;
1710
1711 hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE;
1712
1713 /* get queue configuration from PF */
1714 ret = hclgevf_get_queue_info(hdev);
1715 if (ret)
1716 return ret;
1717 /* get tc configuration from PF */
1718 return hclgevf_get_tc_info(hdev);
1719 }
1720
1721 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
1722 {
1723 struct pci_dev *pdev = ae_dev->pdev;
1724 struct hclgevf_dev *hdev = ae_dev->priv;
1725
1726 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1727 if (!hdev)
1728 return -ENOMEM;
1729
1730 hdev->pdev = pdev;
1731 hdev->ae_dev = ae_dev;
1732 ae_dev->priv = hdev;
1733
1734 return 0;
1735 }
1736
1737 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
1738 {
1739 struct hnae3_handle *roce = &hdev->roce;
1740 struct hnae3_handle *nic = &hdev->nic;
1741
1742 roce->rinfo.num_vectors = hdev->num_roce_msix;
1743
1744 if (hdev->num_msi_left < roce->rinfo.num_vectors ||
1745 hdev->num_msi_left == 0)
1746 return -EINVAL;
1747
1748 roce->rinfo.base_vector = hdev->roce_base_vector;
1749
1750 roce->rinfo.netdev = nic->kinfo.netdev;
1751 roce->rinfo.roce_io_base = hdev->hw.io_base;
1752
1753 roce->pdev = nic->pdev;
1754 roce->ae_algo = nic->ae_algo;
1755 roce->numa_node_mask = nic->numa_node_mask;
1756
1757 return 0;
1758 }
1759
1760 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
1761 {
1762 struct hclgevf_cfg_gro_status_cmd *req;
1763 struct hclgevf_desc desc;
1764 int ret;
1765
1766 if (!hnae3_dev_gro_supported(hdev))
1767 return 0;
1768
1769 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
1770 false);
1771 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
1772
1773 req->gro_en = cpu_to_le16(en ? 1 : 0);
1774
1775 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1776 if (ret)
1777 dev_err(&hdev->pdev->dev,
1778 "VF GRO hardware config cmd failed, ret = %d.\n", ret);
1779
1780 return ret;
1781 }
1782
1783 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
1784 {
1785 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
1786 int i, ret;
1787
1788 rss_cfg->rss_size = hdev->rss_size_max;
1789
1790 if (hdev->pdev->revision >= 0x21) {
1791 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
1792 netdev_rss_key_fill(rss_cfg->rss_hash_key,
1793 HCLGEVF_RSS_KEY_SIZE);
1794
1795 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
1796 rss_cfg->rss_hash_key);
1797 if (ret)
1798 return ret;
1799
1800 rss_cfg->rss_tuple_sets.ipv4_tcp_en =
1801 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1802 rss_cfg->rss_tuple_sets.ipv4_udp_en =
1803 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1804 rss_cfg->rss_tuple_sets.ipv4_sctp_en =
1805 HCLGEVF_RSS_INPUT_TUPLE_SCTP;
1806 rss_cfg->rss_tuple_sets.ipv4_fragment_en =
1807 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1808 rss_cfg->rss_tuple_sets.ipv6_tcp_en =
1809 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1810 rss_cfg->rss_tuple_sets.ipv6_udp_en =
1811 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1812 rss_cfg->rss_tuple_sets.ipv6_sctp_en =
1813 HCLGEVF_RSS_INPUT_TUPLE_SCTP;
1814 rss_cfg->rss_tuple_sets.ipv6_fragment_en =
1815 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1816
1817 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
1818 if (ret)
1819 return ret;
1820
1821 }
1822
1823 /* Initialize RSS indirect table for each vport */
1824 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
1825 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
1826
1827 ret = hclgevf_set_rss_indir_table(hdev);
1828 if (ret)
1829 return ret;
1830
1831 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
1832 }
1833
1834 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
1835 {
1836 /* other vlan config(like, VLAN TX/RX offload) would also be added
1837 * here later
1838 */
1839 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
1840 false);
1841 }
1842
1843 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
1844 {
1845 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1846
1847 if (enable) {
1848 mod_timer(&hdev->service_timer, jiffies + HZ);
1849 } else {
1850 del_timer_sync(&hdev->service_timer);
1851 cancel_work_sync(&hdev->service_task);
1852 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1853 }
1854 }
1855
1856 static int hclgevf_ae_start(struct hnae3_handle *handle)
1857 {
1858 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1859
1860 /* reset tqp stats */
1861 hclgevf_reset_tqp_stats(handle);
1862
1863 hclgevf_request_link_info(hdev);
1864
1865 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1866
1867 return 0;
1868 }
1869
1870 static void hclgevf_ae_stop(struct hnae3_handle *handle)
1871 {
1872 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1873
1874 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1875
1876 /* reset tqp stats */
1877 hclgevf_reset_tqp_stats(handle);
1878 hclgevf_update_link_status(hdev, 0);
1879 }
1880
1881 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
1882 {
1883 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1884 u8 msg_data;
1885
1886 msg_data = alive ? 1 : 0;
1887 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE,
1888 0, &msg_data, 1, false, NULL, 0);
1889 }
1890
1891 static int hclgevf_client_start(struct hnae3_handle *handle)
1892 {
1893 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1894
1895 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
1896 return hclgevf_set_alive(handle, true);
1897 }
1898
1899 static void hclgevf_client_stop(struct hnae3_handle *handle)
1900 {
1901 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1902 int ret;
1903
1904 ret = hclgevf_set_alive(handle, false);
1905 if (ret)
1906 dev_warn(&hdev->pdev->dev,
1907 "%s failed %d\n", __func__, ret);
1908
1909 del_timer_sync(&hdev->keep_alive_timer);
1910 cancel_work_sync(&hdev->keep_alive_task);
1911 }
1912
1913 static void hclgevf_state_init(struct hclgevf_dev *hdev)
1914 {
1915 /* setup tasks for the MBX */
1916 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
1917 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1918 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1919
1920 /* setup tasks for service timer */
1921 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
1922
1923 INIT_WORK(&hdev->service_task, hclgevf_service_task);
1924 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1925
1926 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task);
1927
1928 mutex_init(&hdev->mbx_resp.mbx_mutex);
1929
1930 /* bring the device down */
1931 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1932 }
1933
1934 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
1935 {
1936 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1937
1938 if (hdev->service_timer.function)
1939 del_timer_sync(&hdev->service_timer);
1940 if (hdev->service_task.func)
1941 cancel_work_sync(&hdev->service_task);
1942 if (hdev->mbx_service_task.func)
1943 cancel_work_sync(&hdev->mbx_service_task);
1944 if (hdev->rst_service_task.func)
1945 cancel_work_sync(&hdev->rst_service_task);
1946
1947 mutex_destroy(&hdev->mbx_resp.mbx_mutex);
1948 }
1949
1950 static int hclgevf_init_msi(struct hclgevf_dev *hdev)
1951 {
1952 struct pci_dev *pdev = hdev->pdev;
1953 int vectors;
1954 int i;
1955
1956 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
1957 vectors = pci_alloc_irq_vectors(pdev,
1958 hdev->roce_base_msix_offset + 1,
1959 hdev->num_msi,
1960 PCI_IRQ_MSIX);
1961 else
1962 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1963 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1964
1965 if (vectors < 0) {
1966 dev_err(&pdev->dev,
1967 "failed(%d) to allocate MSI/MSI-X vectors\n",
1968 vectors);
1969 return vectors;
1970 }
1971 if (vectors < hdev->num_msi)
1972 dev_warn(&hdev->pdev->dev,
1973 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1974 hdev->num_msi, vectors);
1975
1976 hdev->num_msi = vectors;
1977 hdev->num_msi_left = vectors;
1978 hdev->base_msi_vector = pdev->irq;
1979 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
1980
1981 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1982 sizeof(u16), GFP_KERNEL);
1983 if (!hdev->vector_status) {
1984 pci_free_irq_vectors(pdev);
1985 return -ENOMEM;
1986 }
1987
1988 for (i = 0; i < hdev->num_msi; i++)
1989 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
1990
1991 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1992 sizeof(int), GFP_KERNEL);
1993 if (!hdev->vector_irq) {
1994 devm_kfree(&pdev->dev, hdev->vector_status);
1995 pci_free_irq_vectors(pdev);
1996 return -ENOMEM;
1997 }
1998
1999 return 0;
2000 }
2001
2002 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
2003 {
2004 struct pci_dev *pdev = hdev->pdev;
2005
2006 devm_kfree(&pdev->dev, hdev->vector_status);
2007 devm_kfree(&pdev->dev, hdev->vector_irq);
2008 pci_free_irq_vectors(pdev);
2009 }
2010
2011 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
2012 {
2013 int ret = 0;
2014
2015 hclgevf_get_misc_vector(hdev);
2016
2017 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
2018 0, "hclgevf_cmd", hdev);
2019 if (ret) {
2020 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
2021 hdev->misc_vector.vector_irq);
2022 return ret;
2023 }
2024
2025 hclgevf_clear_event_cause(hdev, 0);
2026
2027 /* enable misc. vector(vector 0) */
2028 hclgevf_enable_vector(&hdev->misc_vector, true);
2029
2030 return ret;
2031 }
2032
2033 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
2034 {
2035 /* disable misc vector(vector 0) */
2036 hclgevf_enable_vector(&hdev->misc_vector, false);
2037 synchronize_irq(hdev->misc_vector.vector_irq);
2038 free_irq(hdev->misc_vector.vector_irq, hdev);
2039 hclgevf_free_vector(hdev, 0);
2040 }
2041
2042 static int hclgevf_init_client_instance(struct hnae3_client *client,
2043 struct hnae3_ae_dev *ae_dev)
2044 {
2045 struct hclgevf_dev *hdev = ae_dev->priv;
2046 int ret;
2047
2048 switch (client->type) {
2049 case HNAE3_CLIENT_KNIC:
2050 hdev->nic_client = client;
2051 hdev->nic.client = client;
2052
2053 ret = client->ops->init_instance(&hdev->nic);
2054 if (ret)
2055 goto clear_nic;
2056
2057 hnae3_set_client_init_flag(client, ae_dev, 1);
2058
2059 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
2060 struct hnae3_client *rc = hdev->roce_client;
2061
2062 ret = hclgevf_init_roce_base_info(hdev);
2063 if (ret)
2064 goto clear_roce;
2065 ret = rc->ops->init_instance(&hdev->roce);
2066 if (ret)
2067 goto clear_roce;
2068
2069 hnae3_set_client_init_flag(hdev->roce_client, ae_dev,
2070 1);
2071 }
2072 break;
2073 case HNAE3_CLIENT_UNIC:
2074 hdev->nic_client = client;
2075 hdev->nic.client = client;
2076
2077 ret = client->ops->init_instance(&hdev->nic);
2078 if (ret)
2079 goto clear_nic;
2080
2081 hnae3_set_client_init_flag(client, ae_dev, 1);
2082 break;
2083 case HNAE3_CLIENT_ROCE:
2084 if (hnae3_dev_roce_supported(hdev)) {
2085 hdev->roce_client = client;
2086 hdev->roce.client = client;
2087 }
2088
2089 if (hdev->roce_client && hdev->nic_client) {
2090 ret = hclgevf_init_roce_base_info(hdev);
2091 if (ret)
2092 goto clear_roce;
2093
2094 ret = client->ops->init_instance(&hdev->roce);
2095 if (ret)
2096 goto clear_roce;
2097 }
2098
2099 hnae3_set_client_init_flag(client, ae_dev, 1);
2100 break;
2101 default:
2102 return -EINVAL;
2103 }
2104
2105 return 0;
2106
2107 clear_nic:
2108 hdev->nic_client = NULL;
2109 hdev->nic.client = NULL;
2110 return ret;
2111 clear_roce:
2112 hdev->roce_client = NULL;
2113 hdev->roce.client = NULL;
2114 return ret;
2115 }
2116
2117 static void hclgevf_uninit_client_instance(struct hnae3_client *client,
2118 struct hnae3_ae_dev *ae_dev)
2119 {
2120 struct hclgevf_dev *hdev = ae_dev->priv;
2121
2122 /* un-init roce, if it exists */
2123 if (hdev->roce_client) {
2124 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
2125 hdev->roce_client = NULL;
2126 hdev->roce.client = NULL;
2127 }
2128
2129 /* un-init nic/unic, if this was not called by roce client */
2130 if (client->ops->uninit_instance && hdev->nic_client &&
2131 client->type != HNAE3_CLIENT_ROCE) {
2132 client->ops->uninit_instance(&hdev->nic, 0);
2133 hdev->nic_client = NULL;
2134 hdev->nic.client = NULL;
2135 }
2136 }
2137
2138 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
2139 {
2140 struct pci_dev *pdev = hdev->pdev;
2141 struct hclgevf_hw *hw;
2142 int ret;
2143
2144 ret = pci_enable_device(pdev);
2145 if (ret) {
2146 dev_err(&pdev->dev, "failed to enable PCI device\n");
2147 return ret;
2148 }
2149
2150 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2151 if (ret) {
2152 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
2153 goto err_disable_device;
2154 }
2155
2156 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
2157 if (ret) {
2158 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
2159 goto err_disable_device;
2160 }
2161
2162 pci_set_master(pdev);
2163 hw = &hdev->hw;
2164 hw->hdev = hdev;
2165 hw->io_base = pci_iomap(pdev, 2, 0);
2166 if (!hw->io_base) {
2167 dev_err(&pdev->dev, "can't map configuration register space\n");
2168 ret = -ENOMEM;
2169 goto err_clr_master;
2170 }
2171
2172 return 0;
2173
2174 err_clr_master:
2175 pci_clear_master(pdev);
2176 pci_release_regions(pdev);
2177 err_disable_device:
2178 pci_disable_device(pdev);
2179
2180 return ret;
2181 }
2182
2183 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
2184 {
2185 struct pci_dev *pdev = hdev->pdev;
2186
2187 pci_iounmap(pdev, hdev->hw.io_base);
2188 pci_clear_master(pdev);
2189 pci_release_regions(pdev);
2190 pci_disable_device(pdev);
2191 }
2192
2193 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
2194 {
2195 struct hclgevf_query_res_cmd *req;
2196 struct hclgevf_desc desc;
2197 int ret;
2198
2199 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
2200 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2201 if (ret) {
2202 dev_err(&hdev->pdev->dev,
2203 "query vf resource failed, ret = %d.\n", ret);
2204 return ret;
2205 }
2206
2207 req = (struct hclgevf_query_res_cmd *)desc.data;
2208
2209 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
2210 hdev->roce_base_msix_offset =
2211 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
2212 HCLGEVF_MSIX_OFT_ROCEE_M,
2213 HCLGEVF_MSIX_OFT_ROCEE_S);
2214 hdev->num_roce_msix =
2215 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
2216 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2217
2218 /* VF should have NIC vectors and Roce vectors, NIC vectors
2219 * are queued before Roce vectors. The offset is fixed to 64.
2220 */
2221 hdev->num_msi = hdev->num_roce_msix +
2222 hdev->roce_base_msix_offset;
2223 } else {
2224 hdev->num_msi =
2225 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
2226 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2227 }
2228
2229 return 0;
2230 }
2231
2232 static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
2233 {
2234 struct pci_dev *pdev = hdev->pdev;
2235 int ret = 0;
2236
2237 if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
2238 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2239 hclgevf_misc_irq_uninit(hdev);
2240 hclgevf_uninit_msi(hdev);
2241 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2242 }
2243
2244 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2245 pci_set_master(pdev);
2246 ret = hclgevf_init_msi(hdev);
2247 if (ret) {
2248 dev_err(&pdev->dev,
2249 "failed(%d) to init MSI/MSI-X\n", ret);
2250 return ret;
2251 }
2252
2253 ret = hclgevf_misc_irq_init(hdev);
2254 if (ret) {
2255 hclgevf_uninit_msi(hdev);
2256 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
2257 ret);
2258 return ret;
2259 }
2260
2261 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2262 }
2263
2264 return ret;
2265 }
2266
2267 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
2268 {
2269 struct pci_dev *pdev = hdev->pdev;
2270 int ret;
2271
2272 ret = hclgevf_pci_reset(hdev);
2273 if (ret) {
2274 dev_err(&pdev->dev, "pci reset failed %d\n", ret);
2275 return ret;
2276 }
2277
2278 ret = hclgevf_cmd_init(hdev);
2279 if (ret) {
2280 dev_err(&pdev->dev, "cmd failed %d\n", ret);
2281 return ret;
2282 }
2283
2284 ret = hclgevf_rss_init_hw(hdev);
2285 if (ret) {
2286 dev_err(&hdev->pdev->dev,
2287 "failed(%d) to initialize RSS\n", ret);
2288 return ret;
2289 }
2290
2291 ret = hclgevf_config_gro(hdev, true);
2292 if (ret)
2293 return ret;
2294
2295 ret = hclgevf_init_vlan_config(hdev);
2296 if (ret) {
2297 dev_err(&hdev->pdev->dev,
2298 "failed(%d) to initialize VLAN config\n", ret);
2299 return ret;
2300 }
2301
2302 dev_info(&hdev->pdev->dev, "Reset done\n");
2303
2304 return 0;
2305 }
2306
2307 static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
2308 {
2309 struct pci_dev *pdev = hdev->pdev;
2310 int ret;
2311
2312 ret = hclgevf_pci_init(hdev);
2313 if (ret) {
2314 dev_err(&pdev->dev, "PCI initialization failed\n");
2315 return ret;
2316 }
2317
2318 ret = hclgevf_cmd_queue_init(hdev);
2319 if (ret) {
2320 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
2321 goto err_cmd_queue_init;
2322 }
2323
2324 ret = hclgevf_cmd_init(hdev);
2325 if (ret)
2326 goto err_cmd_init;
2327
2328 /* Get vf resource */
2329 ret = hclgevf_query_vf_resource(hdev);
2330 if (ret) {
2331 dev_err(&hdev->pdev->dev,
2332 "Query vf status error, ret = %d.\n", ret);
2333 goto err_cmd_init;
2334 }
2335
2336 ret = hclgevf_init_msi(hdev);
2337 if (ret) {
2338 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
2339 goto err_cmd_init;
2340 }
2341
2342 hclgevf_state_init(hdev);
2343 hdev->reset_level = HNAE3_VF_FUNC_RESET;
2344
2345 ret = hclgevf_misc_irq_init(hdev);
2346 if (ret) {
2347 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
2348 ret);
2349 goto err_misc_irq_init;
2350 }
2351
2352 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2353
2354 ret = hclgevf_configure(hdev);
2355 if (ret) {
2356 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
2357 goto err_config;
2358 }
2359
2360 ret = hclgevf_alloc_tqps(hdev);
2361 if (ret) {
2362 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
2363 goto err_config;
2364 }
2365
2366 ret = hclgevf_set_handle_info(hdev);
2367 if (ret) {
2368 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
2369 goto err_config;
2370 }
2371
2372 ret = hclgevf_config_gro(hdev, true);
2373 if (ret)
2374 goto err_config;
2375
2376 /* Initialize RSS for this VF */
2377 ret = hclgevf_rss_init_hw(hdev);
2378 if (ret) {
2379 dev_err(&hdev->pdev->dev,
2380 "failed(%d) to initialize RSS\n", ret);
2381 goto err_config;
2382 }
2383
2384 ret = hclgevf_init_vlan_config(hdev);
2385 if (ret) {
2386 dev_err(&hdev->pdev->dev,
2387 "failed(%d) to initialize VLAN config\n", ret);
2388 goto err_config;
2389 }
2390
2391 hdev->last_reset_time = jiffies;
2392 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
2393
2394 return 0;
2395
2396 err_config:
2397 hclgevf_misc_irq_uninit(hdev);
2398 err_misc_irq_init:
2399 hclgevf_state_uninit(hdev);
2400 hclgevf_uninit_msi(hdev);
2401 err_cmd_init:
2402 hclgevf_cmd_uninit(hdev);
2403 err_cmd_queue_init:
2404 hclgevf_pci_uninit(hdev);
2405 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2406 return ret;
2407 }
2408
2409 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
2410 {
2411 hclgevf_state_uninit(hdev);
2412
2413 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2414 hclgevf_misc_irq_uninit(hdev);
2415 hclgevf_uninit_msi(hdev);
2416 }
2417
2418 hclgevf_pci_uninit(hdev);
2419 hclgevf_cmd_uninit(hdev);
2420 }
2421
2422 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
2423 {
2424 struct pci_dev *pdev = ae_dev->pdev;
2425 struct hclgevf_dev *hdev;
2426 int ret;
2427
2428 ret = hclgevf_alloc_hdev(ae_dev);
2429 if (ret) {
2430 dev_err(&pdev->dev, "hclge device allocation failed\n");
2431 return ret;
2432 }
2433
2434 ret = hclgevf_init_hdev(ae_dev->priv);
2435 if (ret) {
2436 dev_err(&pdev->dev, "hclge device initialization failed\n");
2437 return ret;
2438 }
2439
2440 hdev = ae_dev->priv;
2441 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0);
2442 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task);
2443
2444 return 0;
2445 }
2446
2447 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
2448 {
2449 struct hclgevf_dev *hdev = ae_dev->priv;
2450
2451 hclgevf_uninit_hdev(hdev);
2452 ae_dev->priv = NULL;
2453 }
2454
2455 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
2456 {
2457 struct hnae3_handle *nic = &hdev->nic;
2458 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
2459
2460 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
2461 }
2462
2463 /**
2464 * hclgevf_get_channels - Get the current channels enabled and max supported.
2465 * @handle: hardware information for network interface
2466 * @ch: ethtool channels structure
2467 *
2468 * We don't support separate tx and rx queues as channels. The other count
2469 * represents how many queues are being used for control. max_combined counts
2470 * how many queue pairs we can support. They may not be mapped 1 to 1 with
2471 * q_vectors since we support a lot more queue pairs than q_vectors.
2472 **/
2473 static void hclgevf_get_channels(struct hnae3_handle *handle,
2474 struct ethtool_channels *ch)
2475 {
2476 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2477
2478 ch->max_combined = hclgevf_get_max_channels(hdev);
2479 ch->other_count = 0;
2480 ch->max_other = 0;
2481 ch->combined_count = hdev->num_tqps;
2482 }
2483
2484 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
2485 u16 *alloc_tqps, u16 *max_rss_size)
2486 {
2487 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2488
2489 *alloc_tqps = hdev->num_tqps;
2490 *max_rss_size = hdev->rss_size_max;
2491 }
2492
2493 static int hclgevf_get_status(struct hnae3_handle *handle)
2494 {
2495 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2496
2497 return hdev->hw.mac.link;
2498 }
2499
2500 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
2501 u8 *auto_neg, u32 *speed,
2502 u8 *duplex)
2503 {
2504 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2505
2506 if (speed)
2507 *speed = hdev->hw.mac.speed;
2508 if (duplex)
2509 *duplex = hdev->hw.mac.duplex;
2510 if (auto_neg)
2511 *auto_neg = AUTONEG_DISABLE;
2512 }
2513
2514 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
2515 u8 duplex)
2516 {
2517 hdev->hw.mac.speed = speed;
2518 hdev->hw.mac.duplex = duplex;
2519 }
2520
2521 static void hclgevf_get_media_type(struct hnae3_handle *handle,
2522 u8 *media_type)
2523 {
2524 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2525 if (media_type)
2526 *media_type = hdev->hw.mac.media_type;
2527 }
2528
2529 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
2530 {
2531 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2532
2533 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
2534 }
2535
2536 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
2537 {
2538 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2539
2540 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2541 }
2542
2543 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
2544 {
2545 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2546
2547 return hdev->reset_count;
2548 }
2549
2550 #define MAX_SEPARATE_NUM 4
2551 #define SEPARATOR_VALUE 0xFFFFFFFF
2552 #define REG_NUM_PER_LINE 4
2553 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
2554
2555 static int hclgevf_get_regs_len(struct hnae3_handle *handle)
2556 {
2557 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
2558 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2559
2560 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
2561 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
2562 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
2563 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
2564
2565 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
2566 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
2567 }
2568
2569 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
2570 void *data)
2571 {
2572 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2573 int i, j, reg_um, separator_num;
2574 u32 *reg = data;
2575
2576 *version = hdev->fw_version;
2577
2578 /* fetching per-VF registers values from VF PCIe register space */
2579 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
2580 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
2581 for (i = 0; i < reg_um; i++)
2582 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
2583 for (i = 0; i < separator_num; i++)
2584 *reg++ = SEPARATOR_VALUE;
2585
2586 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
2587 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
2588 for (i = 0; i < reg_um; i++)
2589 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
2590 for (i = 0; i < separator_num; i++)
2591 *reg++ = SEPARATOR_VALUE;
2592
2593 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
2594 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
2595 for (j = 0; j < hdev->num_tqps; j++) {
2596 for (i = 0; i < reg_um; i++)
2597 *reg++ = hclgevf_read_dev(&hdev->hw,
2598 ring_reg_addr_list[i] +
2599 0x200 * j);
2600 for (i = 0; i < separator_num; i++)
2601 *reg++ = SEPARATOR_VALUE;
2602 }
2603
2604 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
2605 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
2606 for (j = 0; j < hdev->num_msi_used - 1; j++) {
2607 for (i = 0; i < reg_um; i++)
2608 *reg++ = hclgevf_read_dev(&hdev->hw,
2609 tqp_intr_reg_addr_list[i] +
2610 4 * j);
2611 for (i = 0; i < separator_num; i++)
2612 *reg++ = SEPARATOR_VALUE;
2613 }
2614 }
2615
2616 static const struct hnae3_ae_ops hclgevf_ops = {
2617 .init_ae_dev = hclgevf_init_ae_dev,
2618 .uninit_ae_dev = hclgevf_uninit_ae_dev,
2619 .flr_prepare = hclgevf_flr_prepare,
2620 .flr_done = hclgevf_flr_done,
2621 .init_client_instance = hclgevf_init_client_instance,
2622 .uninit_client_instance = hclgevf_uninit_client_instance,
2623 .start = hclgevf_ae_start,
2624 .stop = hclgevf_ae_stop,
2625 .client_start = hclgevf_client_start,
2626 .client_stop = hclgevf_client_stop,
2627 .map_ring_to_vector = hclgevf_map_ring_to_vector,
2628 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
2629 .get_vector = hclgevf_get_vector,
2630 .put_vector = hclgevf_put_vector,
2631 .reset_queue = hclgevf_reset_tqp,
2632 .set_promisc_mode = hclgevf_set_promisc_mode,
2633 .get_mac_addr = hclgevf_get_mac_addr,
2634 .set_mac_addr = hclgevf_set_mac_addr,
2635 .add_uc_addr = hclgevf_add_uc_addr,
2636 .rm_uc_addr = hclgevf_rm_uc_addr,
2637 .add_mc_addr = hclgevf_add_mc_addr,
2638 .rm_mc_addr = hclgevf_rm_mc_addr,
2639 .get_stats = hclgevf_get_stats,
2640 .update_stats = hclgevf_update_stats,
2641 .get_strings = hclgevf_get_strings,
2642 .get_sset_count = hclgevf_get_sset_count,
2643 .get_rss_key_size = hclgevf_get_rss_key_size,
2644 .get_rss_indir_size = hclgevf_get_rss_indir_size,
2645 .get_rss = hclgevf_get_rss,
2646 .set_rss = hclgevf_set_rss,
2647 .get_rss_tuple = hclgevf_get_rss_tuple,
2648 .set_rss_tuple = hclgevf_set_rss_tuple,
2649 .get_tc_size = hclgevf_get_tc_size,
2650 .get_fw_version = hclgevf_get_fw_version,
2651 .set_vlan_filter = hclgevf_set_vlan_filter,
2652 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
2653 .reset_event = hclgevf_reset_event,
2654 .set_default_reset_request = hclgevf_set_def_reset_request,
2655 .get_channels = hclgevf_get_channels,
2656 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
2657 .get_regs_len = hclgevf_get_regs_len,
2658 .get_regs = hclgevf_get_regs,
2659 .get_status = hclgevf_get_status,
2660 .get_ksettings_an_result = hclgevf_get_ksettings_an_result,
2661 .get_media_type = hclgevf_get_media_type,
2662 .get_hw_reset_stat = hclgevf_get_hw_reset_stat,
2663 .ae_dev_resetting = hclgevf_ae_dev_resetting,
2664 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
2665 .set_mtu = hclgevf_set_mtu,
2666 .get_global_queue_id = hclgevf_get_qid_global,
2667 .set_timer_task = hclgevf_set_timer_task,
2668 };
2669
2670 static struct hnae3_ae_algo ae_algovf = {
2671 .ops = &hclgevf_ops,
2672 .pdev_id_table = ae_algovf_pci_tbl,
2673 };
2674
2675 static int hclgevf_init(void)
2676 {
2677 pr_info("%s is initializing\n", HCLGEVF_NAME);
2678
2679 hnae3_register_ae_algo(&ae_algovf);
2680
2681 return 0;
2682 }
2683
2684 static void hclgevf_exit(void)
2685 {
2686 hnae3_unregister_ae_algo(&ae_algovf);
2687 }
2688 module_init(hclgevf_init);
2689 module_exit(hclgevf_exit);
2690
2691 MODULE_LICENSE("GPL");
2692 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2693 MODULE_DESCRIPTION("HCLGEVF Driver");
2694 MODULE_VERSION(HCLGEVF_MOD_VERSION);