]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
net: hns3: remove TSO config command from VF driver
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3vf / hclgevf_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/etherdevice.h>
5 #include "hclgevf_cmd.h"
6 #include "hclgevf_main.h"
7 #include "hclge_mbx.h"
8 #include "hnae3.h"
9
10 #define HCLGEVF_NAME "hclgevf"
11
12 static struct hnae3_ae_algo ae_algovf;
13
14 static const struct pci_device_id ae_algovf_pci_tbl[] = {
15 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
16 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
17 /* required last entry */
18 {0, }
19 };
20
21 static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
22 struct hnae3_handle *handle)
23 {
24 return container_of(handle, struct hclgevf_dev, nic);
25 }
26
27 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
28 {
29 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
30 struct hnae3_queue *queue;
31 struct hclgevf_desc desc;
32 struct hclgevf_tqp *tqp;
33 int status;
34 int i;
35
36 for (i = 0; i < hdev->num_tqps; i++) {
37 queue = handle->kinfo.tqp[i];
38 tqp = container_of(queue, struct hclgevf_tqp, q);
39 hclgevf_cmd_setup_basic_desc(&desc,
40 HCLGEVF_OPC_QUERY_RX_STATUS,
41 true);
42
43 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
44 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
45 if (status) {
46 dev_err(&hdev->pdev->dev,
47 "Query tqp stat fail, status = %d,queue = %d\n",
48 status, i);
49 return status;
50 }
51 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
52 le32_to_cpu(desc.data[1]);
53
54 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
55 true);
56
57 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
58 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
59 if (status) {
60 dev_err(&hdev->pdev->dev,
61 "Query tqp stat fail, status = %d,queue = %d\n",
62 status, i);
63 return status;
64 }
65 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
66 le32_to_cpu(desc.data[1]);
67 }
68
69 return 0;
70 }
71
72 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
73 {
74 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
75 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
76 struct hclgevf_tqp *tqp;
77 u64 *buff = data;
78 int i;
79
80 for (i = 0; i < hdev->num_tqps; i++) {
81 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
82 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
83 }
84 for (i = 0; i < kinfo->num_tqps; i++) {
85 tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
86 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
87 }
88
89 return buff;
90 }
91
92 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
93 {
94 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
95
96 return hdev->num_tqps * 2;
97 }
98
99 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
100 {
101 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
102 u8 *buff = data;
103 int i = 0;
104
105 for (i = 0; i < hdev->num_tqps; i++) {
106 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
107 struct hclgevf_tqp, q);
108 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
109 tqp->index);
110 buff += ETH_GSTRING_LEN;
111 }
112
113 for (i = 0; i < hdev->num_tqps; i++) {
114 struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
115 struct hclgevf_tqp, q);
116 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
117 tqp->index);
118 buff += ETH_GSTRING_LEN;
119 }
120
121 return buff;
122 }
123
124 static void hclgevf_update_stats(struct hnae3_handle *handle,
125 struct net_device_stats *net_stats)
126 {
127 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
128 int status;
129
130 status = hclgevf_tqps_update_stats(handle);
131 if (status)
132 dev_err(&hdev->pdev->dev,
133 "VF update of TQPS stats fail, status = %d.\n",
134 status);
135 }
136
137 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
138 {
139 if (strset == ETH_SS_TEST)
140 return -EOPNOTSUPP;
141 else if (strset == ETH_SS_STATS)
142 return hclgevf_tqps_get_sset_count(handle, strset);
143
144 return 0;
145 }
146
147 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
148 u8 *data)
149 {
150 u8 *p = (char *)data;
151
152 if (strset == ETH_SS_STATS)
153 p = hclgevf_tqps_get_strings(handle, p);
154 }
155
156 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
157 {
158 hclgevf_tqps_get_stats(handle, data);
159 }
160
161 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
162 {
163 u8 resp_msg;
164 int status;
165
166 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
167 true, &resp_msg, sizeof(u8));
168 if (status) {
169 dev_err(&hdev->pdev->dev,
170 "VF request to get TC info from PF failed %d",
171 status);
172 return status;
173 }
174
175 hdev->hw_tc_map = resp_msg;
176
177 return 0;
178 }
179
180 static int hclge_get_queue_info(struct hclgevf_dev *hdev)
181 {
182 #define HCLGEVF_TQPS_RSS_INFO_LEN 8
183 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
184 int status;
185
186 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
187 true, resp_msg,
188 HCLGEVF_TQPS_RSS_INFO_LEN);
189 if (status) {
190 dev_err(&hdev->pdev->dev,
191 "VF request to get tqp info from PF failed %d",
192 status);
193 return status;
194 }
195
196 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
197 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
198 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
199 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
200
201 return 0;
202 }
203
204 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
205 {
206 struct hclgevf_tqp *tqp;
207 int i;
208
209 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
210 sizeof(struct hclgevf_tqp), GFP_KERNEL);
211 if (!hdev->htqp)
212 return -ENOMEM;
213
214 tqp = hdev->htqp;
215
216 for (i = 0; i < hdev->num_tqps; i++) {
217 tqp->dev = &hdev->pdev->dev;
218 tqp->index = i;
219
220 tqp->q.ae_algo = &ae_algovf;
221 tqp->q.buf_size = hdev->rx_buf_len;
222 tqp->q.desc_num = hdev->num_desc;
223 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
224 i * HCLGEVF_TQP_REG_SIZE;
225
226 tqp++;
227 }
228
229 return 0;
230 }
231
232 static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
233 {
234 struct hnae3_handle *nic = &hdev->nic;
235 struct hnae3_knic_private_info *kinfo;
236 u16 new_tqps = hdev->num_tqps;
237 int i;
238
239 kinfo = &nic->kinfo;
240 kinfo->num_tc = 0;
241 kinfo->num_desc = hdev->num_desc;
242 kinfo->rx_buf_len = hdev->rx_buf_len;
243 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
244 if (hdev->hw_tc_map & BIT(i))
245 kinfo->num_tc++;
246
247 kinfo->rss_size
248 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
249 new_tqps = kinfo->rss_size * kinfo->num_tc;
250 kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
251
252 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
253 sizeof(struct hnae3_queue *), GFP_KERNEL);
254 if (!kinfo->tqp)
255 return -ENOMEM;
256
257 for (i = 0; i < kinfo->num_tqps; i++) {
258 hdev->htqp[i].q.handle = &hdev->nic;
259 hdev->htqp[i].q.tqp_index = i;
260 kinfo->tqp[i] = &hdev->htqp[i].q;
261 }
262
263 return 0;
264 }
265
266 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
267 {
268 int status;
269 u8 resp_msg;
270
271 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
272 0, false, &resp_msg, sizeof(u8));
273 if (status)
274 dev_err(&hdev->pdev->dev,
275 "VF failed to fetch link status(%d) from PF", status);
276 }
277
278 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
279 {
280 struct hnae3_handle *handle = &hdev->nic;
281 struct hnae3_client *client;
282
283 client = handle->client;
284
285 if (link_state != hdev->hw.mac.link) {
286 client->ops->link_status_change(handle, !!link_state);
287 hdev->hw.mac.link = link_state;
288 }
289 }
290
291 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
292 {
293 struct hnae3_handle *nic = &hdev->nic;
294 int ret;
295
296 nic->ae_algo = &ae_algovf;
297 nic->pdev = hdev->pdev;
298 nic->numa_node_mask = hdev->numa_node_mask;
299 nic->flags |= HNAE3_SUPPORT_VF;
300
301 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
302 dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
303 hdev->ae_dev->dev_type);
304 return -EINVAL;
305 }
306
307 ret = hclgevf_knic_setup(hdev);
308 if (ret)
309 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
310 ret);
311 return ret;
312 }
313
314 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
315 {
316 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
317 hdev->num_msi_left += 1;
318 hdev->num_msi_used -= 1;
319 }
320
321 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
322 struct hnae3_vector_info *vector_info)
323 {
324 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
325 struct hnae3_vector_info *vector = vector_info;
326 int alloc = 0;
327 int i, j;
328
329 vector_num = min(hdev->num_msi_left, vector_num);
330
331 for (j = 0; j < vector_num; j++) {
332 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
333 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
334 vector->vector = pci_irq_vector(hdev->pdev, i);
335 vector->io_addr = hdev->hw.io_base +
336 HCLGEVF_VECTOR_REG_BASE +
337 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
338 hdev->vector_status[i] = 0;
339 hdev->vector_irq[i] = vector->vector;
340
341 vector++;
342 alloc++;
343
344 break;
345 }
346 }
347 }
348 hdev->num_msi_left -= alloc;
349 hdev->num_msi_used += alloc;
350
351 return alloc;
352 }
353
354 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
355 {
356 int i;
357
358 for (i = 0; i < hdev->num_msi; i++)
359 if (vector == hdev->vector_irq[i])
360 return i;
361
362 return -EINVAL;
363 }
364
365 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
366 {
367 return HCLGEVF_RSS_KEY_SIZE;
368 }
369
370 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
371 {
372 return HCLGEVF_RSS_IND_TBL_SIZE;
373 }
374
375 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
376 {
377 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
378 struct hclgevf_rss_indirection_table_cmd *req;
379 struct hclgevf_desc desc;
380 int status;
381 int i, j;
382
383 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
384
385 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
386 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
387 false);
388 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
389 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
390 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
391 req->rss_result[j] =
392 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
393
394 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
395 if (status) {
396 dev_err(&hdev->pdev->dev,
397 "VF failed(=%d) to set RSS indirection table\n",
398 status);
399 return status;
400 }
401 }
402
403 return 0;
404 }
405
406 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
407 {
408 struct hclgevf_rss_tc_mode_cmd *req;
409 u16 tc_offset[HCLGEVF_MAX_TC_NUM];
410 u16 tc_valid[HCLGEVF_MAX_TC_NUM];
411 u16 tc_size[HCLGEVF_MAX_TC_NUM];
412 struct hclgevf_desc desc;
413 u16 roundup_size;
414 int status;
415 int i;
416
417 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
418
419 roundup_size = roundup_pow_of_two(rss_size);
420 roundup_size = ilog2(roundup_size);
421
422 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
423 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
424 tc_size[i] = roundup_size;
425 tc_offset[i] = rss_size * i;
426 }
427
428 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
429 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
430 hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
431 (tc_valid[i] & 0x1));
432 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
433 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
434 hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
435 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
436 }
437 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
438 if (status)
439 dev_err(&hdev->pdev->dev,
440 "VF failed(=%d) to set rss tc mode\n", status);
441
442 return status;
443 }
444
445 static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
446 u8 *key)
447 {
448 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
449 struct hclgevf_rss_config_cmd *req;
450 int lkup_times = key ? 3 : 1;
451 struct hclgevf_desc desc;
452 int key_offset;
453 int key_size;
454 int status;
455
456 req = (struct hclgevf_rss_config_cmd *)desc.data;
457 lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
458
459 for (key_offset = 0; key_offset < lkup_times; key_offset++) {
460 hclgevf_cmd_setup_basic_desc(&desc,
461 HCLGEVF_OPC_RSS_GENERIC_CONFIG,
462 true);
463 req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
464
465 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
466 if (status) {
467 dev_err(&hdev->pdev->dev,
468 "failed to get hardware RSS cfg, status = %d\n",
469 status);
470 return status;
471 }
472
473 if (key_offset == 2)
474 key_size =
475 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
476 else
477 key_size = HCLGEVF_RSS_HASH_KEY_NUM;
478
479 if (key)
480 memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
481 req->hash_key,
482 key_size);
483 }
484
485 if (hash) {
486 if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
487 *hash = ETH_RSS_HASH_TOP;
488 else
489 *hash = ETH_RSS_HASH_UNKNOWN;
490 }
491
492 return 0;
493 }
494
495 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
496 u8 *hfunc)
497 {
498 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
499 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
500 int i;
501
502 if (indir)
503 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
504 indir[i] = rss_cfg->rss_indirection_tbl[i];
505
506 return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
507 }
508
509 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
510 const u8 *key, const u8 hfunc)
511 {
512 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
513 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
514 int i;
515
516 /* update the shadow RSS table with user specified qids */
517 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
518 rss_cfg->rss_indirection_tbl[i] = indir[i];
519
520 /* update the hardware */
521 return hclgevf_set_rss_indir_table(hdev);
522 }
523
524 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
525 {
526 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
527 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
528
529 return rss_cfg->rss_size;
530 }
531
532 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
533 int vector,
534 struct hnae3_ring_chain_node *ring_chain)
535 {
536 #define HCLGEVF_RING_NODE_VARIABLE_NUM 3
537 #define HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM 3
538 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
539 struct hnae3_ring_chain_node *node;
540 struct hclge_mbx_vf_to_pf_cmd *req;
541 struct hclgevf_desc desc;
542 int i, vector_id;
543 int status;
544 u8 type;
545
546 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
547 vector_id = hclgevf_get_vector_index(hdev, vector);
548 if (vector_id < 0) {
549 dev_err(&handle->pdev->dev,
550 "Get vector index fail. ret =%d\n", vector_id);
551 return vector_id;
552 }
553
554 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
555 type = en ?
556 HCLGE_MBX_MAP_RING_TO_VECTOR : HCLGE_MBX_UNMAP_RING_TO_VECTOR;
557 req->msg[0] = type;
558 req->msg[1] = vector_id; /* vector_id should be id in VF */
559
560 i = 0;
561 for (node = ring_chain; node; node = node->next) {
562 i++;
563 /* msg[2] is cause num */
564 req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i] =
565 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
566 req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 1] =
567 node->tqp_index;
568 if (i == (HCLGE_MBX_VF_MSG_DATA_NUM -
569 HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM) /
570 HCLGEVF_RING_NODE_VARIABLE_NUM) {
571 req->msg[2] = i;
572
573 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
574 if (status) {
575 dev_err(&hdev->pdev->dev,
576 "Map TQP fail, status is %d.\n",
577 status);
578 return status;
579 }
580 i = 0;
581 hclgevf_cmd_setup_basic_desc(&desc,
582 HCLGEVF_OPC_MBX_VF_TO_PF,
583 false);
584 req->msg[0] = type;
585 req->msg[1] = vector_id;
586 }
587 }
588
589 if (i > 0) {
590 req->msg[2] = i;
591
592 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
593 if (status) {
594 dev_err(&hdev->pdev->dev,
595 "Map TQP fail, status is %d.\n", status);
596 return status;
597 }
598 }
599
600 return 0;
601 }
602
603 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
604 struct hnae3_ring_chain_node *ring_chain)
605 {
606 return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
607 }
608
609 static int hclgevf_unmap_ring_from_vector(
610 struct hnae3_handle *handle,
611 int vector,
612 struct hnae3_ring_chain_node *ring_chain)
613 {
614 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
615 int ret, vector_id;
616
617 vector_id = hclgevf_get_vector_index(hdev, vector);
618 if (vector_id < 0) {
619 dev_err(&handle->pdev->dev,
620 "Get vector index fail. ret =%d\n", vector_id);
621 return vector_id;
622 }
623
624 ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
625 if (ret) {
626 dev_err(&handle->pdev->dev,
627 "Unmap ring from vector fail. vector=%d, ret =%d\n",
628 vector_id,
629 ret);
630 return ret;
631 }
632
633 hclgevf_free_vector(hdev, vector);
634
635 return 0;
636 }
637
638 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
639 {
640 struct hclge_mbx_vf_to_pf_cmd *req;
641 struct hclgevf_desc desc;
642 int status;
643
644 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
645
646 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
647 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
648 req->msg[1] = en;
649
650 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
651 if (status)
652 dev_err(&hdev->pdev->dev,
653 "Set promisc mode fail, status is %d.\n", status);
654
655 return status;
656 }
657
658 static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en)
659 {
660 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
661
662 hclgevf_cmd_set_promisc_mode(hdev, en);
663 }
664
665 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
666 int stream_id, bool enable)
667 {
668 struct hclgevf_cfg_com_tqp_queue_cmd *req;
669 struct hclgevf_desc desc;
670 int status;
671
672 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
673
674 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
675 false);
676 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
677 req->stream_id = cpu_to_le16(stream_id);
678 req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
679
680 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
681 if (status)
682 dev_err(&hdev->pdev->dev,
683 "TQP enable fail, status =%d.\n", status);
684
685 return status;
686 }
687
688 static int hclgevf_get_queue_id(struct hnae3_queue *queue)
689 {
690 struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
691
692 return tqp->index;
693 }
694
695 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
696 {
697 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
698 struct hnae3_queue *queue;
699 struct hclgevf_tqp *tqp;
700 int i;
701
702 for (i = 0; i < hdev->num_tqps; i++) {
703 queue = handle->kinfo.tqp[i];
704 tqp = container_of(queue, struct hclgevf_tqp, q);
705 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
706 }
707 }
708
709 static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en)
710 {
711 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
712 u8 msg[2] = {0};
713
714 msg[0] = en;
715 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
716 HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE,
717 msg, 1, false, NULL, 0);
718 }
719
720 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
721 {
722 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
723
724 ether_addr_copy(p, hdev->hw.mac.mac_addr);
725 }
726
727 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p)
728 {
729 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
730 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
731 u8 *new_mac_addr = (u8 *)p;
732 u8 msg_data[ETH_ALEN * 2];
733 int status;
734
735 ether_addr_copy(msg_data, new_mac_addr);
736 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
737
738 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
739 HCLGE_MBX_MAC_VLAN_UC_MODIFY,
740 msg_data, ETH_ALEN * 2,
741 false, NULL, 0);
742 if (!status)
743 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
744
745 return status;
746 }
747
748 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
749 const unsigned char *addr)
750 {
751 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
752
753 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
754 HCLGE_MBX_MAC_VLAN_UC_ADD,
755 addr, ETH_ALEN, false, NULL, 0);
756 }
757
758 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
759 const unsigned char *addr)
760 {
761 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
762
763 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
764 HCLGE_MBX_MAC_VLAN_UC_REMOVE,
765 addr, ETH_ALEN, false, NULL, 0);
766 }
767
768 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
769 const unsigned char *addr)
770 {
771 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
772
773 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
774 HCLGE_MBX_MAC_VLAN_MC_ADD,
775 addr, ETH_ALEN, false, NULL, 0);
776 }
777
778 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
779 const unsigned char *addr)
780 {
781 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
782
783 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
784 HCLGE_MBX_MAC_VLAN_MC_REMOVE,
785 addr, ETH_ALEN, false, NULL, 0);
786 }
787
788 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
789 __be16 proto, u16 vlan_id,
790 bool is_kill)
791 {
792 #define HCLGEVF_VLAN_MBX_MSG_LEN 5
793 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
794 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
795
796 if (vlan_id > 4095)
797 return -EINVAL;
798
799 if (proto != htons(ETH_P_8021Q))
800 return -EPROTONOSUPPORT;
801
802 msg_data[0] = is_kill;
803 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
804 memcpy(&msg_data[3], &proto, sizeof(proto));
805 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
806 HCLGE_MBX_VLAN_FILTER, msg_data,
807 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
808 }
809
810 static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
811 {
812 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
813 u8 msg_data[2];
814
815 memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
816
817 hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false,
818 NULL, 0);
819 }
820
821 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
822 {
823 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
824
825 return hdev->fw_version;
826 }
827
828 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
829 {
830 struct hclgevf_misc_vector *vector = &hdev->misc_vector;
831
832 vector->vector_irq = pci_irq_vector(hdev->pdev,
833 HCLGEVF_MISC_VECTOR_NUM);
834 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
835 /* vector status always valid for Vector 0 */
836 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
837 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
838
839 hdev->num_msi_left -= 1;
840 hdev->num_msi_used += 1;
841 }
842
843 static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
844 {
845 if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
846 schedule_work(&hdev->mbx_service_task);
847 }
848
849 static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
850 {
851 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
852 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
853 schedule_work(&hdev->service_task);
854 }
855
856 static void hclgevf_service_timer(struct timer_list *t)
857 {
858 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
859
860 mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
861
862 hclgevf_task_schedule(hdev);
863 }
864
865 static void hclgevf_mailbox_service_task(struct work_struct *work)
866 {
867 struct hclgevf_dev *hdev;
868
869 hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
870
871 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
872 return;
873
874 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
875
876 hclgevf_mbx_handler(hdev);
877
878 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
879 }
880
881 static void hclgevf_service_task(struct work_struct *work)
882 {
883 struct hclgevf_dev *hdev;
884
885 hdev = container_of(work, struct hclgevf_dev, service_task);
886
887 /* request the link status from the PF. PF would be able to tell VF
888 * about such updates in future so we might remove this later
889 */
890 hclgevf_request_link_info(hdev);
891
892 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
893 }
894
895 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
896 {
897 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
898 }
899
900 static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
901 {
902 u32 cmdq_src_reg;
903
904 /* fetch the events from their corresponding regs */
905 cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
906 HCLGEVF_VECTOR0_CMDQ_SRC_REG);
907
908 /* check for vector0 mailbox(=CMDQ RX) event source */
909 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
910 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
911 *clearval = cmdq_src_reg;
912 return true;
913 }
914
915 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
916
917 return false;
918 }
919
920 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
921 {
922 writel(en ? 1 : 0, vector->addr);
923 }
924
925 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
926 {
927 struct hclgevf_dev *hdev = data;
928 u32 clearval;
929
930 hclgevf_enable_vector(&hdev->misc_vector, false);
931 if (!hclgevf_check_event_cause(hdev, &clearval))
932 goto skip_sched;
933
934 /* schedule the VF mailbox service task, if not already scheduled */
935 hclgevf_mbx_task_schedule(hdev);
936
937 hclgevf_clear_event_cause(hdev, clearval);
938
939 skip_sched:
940 hclgevf_enable_vector(&hdev->misc_vector, true);
941
942 return IRQ_HANDLED;
943 }
944
945 static int hclgevf_configure(struct hclgevf_dev *hdev)
946 {
947 int ret;
948
949 /* get queue configuration from PF */
950 ret = hclge_get_queue_info(hdev);
951 if (ret)
952 return ret;
953 /* get tc configuration from PF */
954 return hclgevf_get_tc_info(hdev);
955 }
956
957 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
958 {
959 struct hnae3_handle *roce = &hdev->roce;
960 struct hnae3_handle *nic = &hdev->nic;
961
962 roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
963
964 if (hdev->num_msi_left < roce->rinfo.num_vectors ||
965 hdev->num_msi_left == 0)
966 return -EINVAL;
967
968 roce->rinfo.base_vector =
969 hdev->vector_status[hdev->num_msi_used];
970
971 roce->rinfo.netdev = nic->kinfo.netdev;
972 roce->rinfo.roce_io_base = hdev->hw.io_base;
973
974 roce->pdev = nic->pdev;
975 roce->ae_algo = nic->ae_algo;
976 roce->numa_node_mask = nic->numa_node_mask;
977
978 return 0;
979 }
980
981 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
982 {
983 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
984 int i, ret;
985
986 rss_cfg->rss_size = hdev->rss_size_max;
987
988 /* Initialize RSS indirect table for each vport */
989 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
990 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
991
992 ret = hclgevf_set_rss_indir_table(hdev);
993 if (ret)
994 return ret;
995
996 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
997 }
998
999 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
1000 {
1001 /* other vlan config(like, VLAN TX/RX offload) would also be added
1002 * here later
1003 */
1004 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
1005 false);
1006 }
1007
1008 static int hclgevf_ae_start(struct hnae3_handle *handle)
1009 {
1010 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1011 int i, queue_id;
1012
1013 for (i = 0; i < handle->kinfo.num_tqps; i++) {
1014 /* ring enable */
1015 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1016 if (queue_id < 0) {
1017 dev_warn(&hdev->pdev->dev,
1018 "Get invalid queue id, ignore it\n");
1019 continue;
1020 }
1021
1022 hclgevf_tqp_enable(hdev, queue_id, 0, true);
1023 }
1024
1025 /* reset tqp stats */
1026 hclgevf_reset_tqp_stats(handle);
1027
1028 hclgevf_request_link_info(hdev);
1029
1030 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1031 mod_timer(&hdev->service_timer, jiffies + HZ);
1032
1033 return 0;
1034 }
1035
1036 static void hclgevf_ae_stop(struct hnae3_handle *handle)
1037 {
1038 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1039 int i, queue_id;
1040
1041 for (i = 0; i < hdev->num_tqps; i++) {
1042 /* Ring disable */
1043 queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1044 if (queue_id < 0) {
1045 dev_warn(&hdev->pdev->dev,
1046 "Get invalid queue id, ignore it\n");
1047 continue;
1048 }
1049
1050 hclgevf_tqp_enable(hdev, queue_id, 0, false);
1051 }
1052
1053 /* reset tqp stats */
1054 hclgevf_reset_tqp_stats(handle);
1055 }
1056
1057 static void hclgevf_state_init(struct hclgevf_dev *hdev)
1058 {
1059 /* setup tasks for the MBX */
1060 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
1061 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1062 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1063
1064 /* setup tasks for service timer */
1065 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
1066
1067 INIT_WORK(&hdev->service_task, hclgevf_service_task);
1068 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1069
1070 mutex_init(&hdev->mbx_resp.mbx_mutex);
1071
1072 /* bring the device down */
1073 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1074 }
1075
1076 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
1077 {
1078 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1079
1080 if (hdev->service_timer.function)
1081 del_timer_sync(&hdev->service_timer);
1082 if (hdev->service_task.func)
1083 cancel_work_sync(&hdev->service_task);
1084 if (hdev->mbx_service_task.func)
1085 cancel_work_sync(&hdev->mbx_service_task);
1086
1087 mutex_destroy(&hdev->mbx_resp.mbx_mutex);
1088 }
1089
1090 static int hclgevf_init_msi(struct hclgevf_dev *hdev)
1091 {
1092 struct pci_dev *pdev = hdev->pdev;
1093 int vectors;
1094 int i;
1095
1096 hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
1097
1098 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1099 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1100 if (vectors < 0) {
1101 dev_err(&pdev->dev,
1102 "failed(%d) to allocate MSI/MSI-X vectors\n",
1103 vectors);
1104 return vectors;
1105 }
1106 if (vectors < hdev->num_msi)
1107 dev_warn(&hdev->pdev->dev,
1108 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1109 hdev->num_msi, vectors);
1110
1111 hdev->num_msi = vectors;
1112 hdev->num_msi_left = vectors;
1113 hdev->base_msi_vector = pdev->irq;
1114
1115 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1116 sizeof(u16), GFP_KERNEL);
1117 if (!hdev->vector_status) {
1118 pci_free_irq_vectors(pdev);
1119 return -ENOMEM;
1120 }
1121
1122 for (i = 0; i < hdev->num_msi; i++)
1123 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
1124
1125 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1126 sizeof(int), GFP_KERNEL);
1127 if (!hdev->vector_irq) {
1128 pci_free_irq_vectors(pdev);
1129 return -ENOMEM;
1130 }
1131
1132 return 0;
1133 }
1134
1135 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
1136 {
1137 struct pci_dev *pdev = hdev->pdev;
1138
1139 pci_free_irq_vectors(pdev);
1140 }
1141
1142 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
1143 {
1144 int ret = 0;
1145
1146 hclgevf_get_misc_vector(hdev);
1147
1148 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
1149 0, "hclgevf_cmd", hdev);
1150 if (ret) {
1151 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
1152 hdev->misc_vector.vector_irq);
1153 return ret;
1154 }
1155
1156 /* enable misc. vector(vector 0) */
1157 hclgevf_enable_vector(&hdev->misc_vector, true);
1158
1159 return ret;
1160 }
1161
1162 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
1163 {
1164 /* disable misc vector(vector 0) */
1165 hclgevf_enable_vector(&hdev->misc_vector, false);
1166 free_irq(hdev->misc_vector.vector_irq, hdev);
1167 hclgevf_free_vector(hdev, 0);
1168 }
1169
1170 static int hclgevf_init_instance(struct hclgevf_dev *hdev,
1171 struct hnae3_client *client)
1172 {
1173 int ret;
1174
1175 switch (client->type) {
1176 case HNAE3_CLIENT_KNIC:
1177 hdev->nic_client = client;
1178 hdev->nic.client = client;
1179
1180 ret = client->ops->init_instance(&hdev->nic);
1181 if (ret)
1182 return ret;
1183
1184 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1185 struct hnae3_client *rc = hdev->roce_client;
1186
1187 ret = hclgevf_init_roce_base_info(hdev);
1188 if (ret)
1189 return ret;
1190 ret = rc->ops->init_instance(&hdev->roce);
1191 if (ret)
1192 return ret;
1193 }
1194 break;
1195 case HNAE3_CLIENT_UNIC:
1196 hdev->nic_client = client;
1197 hdev->nic.client = client;
1198
1199 ret = client->ops->init_instance(&hdev->nic);
1200 if (ret)
1201 return ret;
1202 break;
1203 case HNAE3_CLIENT_ROCE:
1204 hdev->roce_client = client;
1205 hdev->roce.client = client;
1206
1207 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1208 ret = hclgevf_init_roce_base_info(hdev);
1209 if (ret)
1210 return ret;
1211
1212 ret = client->ops->init_instance(&hdev->roce);
1213 if (ret)
1214 return ret;
1215 }
1216 }
1217
1218 return 0;
1219 }
1220
1221 static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
1222 struct hnae3_client *client)
1223 {
1224 /* un-init roce, if it exists */
1225 if (hdev->roce_client)
1226 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
1227
1228 /* un-init nic/unic, if this was not called by roce client */
1229 if ((client->ops->uninit_instance) &&
1230 (client->type != HNAE3_CLIENT_ROCE))
1231 client->ops->uninit_instance(&hdev->nic, 0);
1232 }
1233
1234 static int hclgevf_register_client(struct hnae3_client *client,
1235 struct hnae3_ae_dev *ae_dev)
1236 {
1237 struct hclgevf_dev *hdev = ae_dev->priv;
1238
1239 return hclgevf_init_instance(hdev, client);
1240 }
1241
1242 static void hclgevf_unregister_client(struct hnae3_client *client,
1243 struct hnae3_ae_dev *ae_dev)
1244 {
1245 struct hclgevf_dev *hdev = ae_dev->priv;
1246
1247 hclgevf_uninit_instance(hdev, client);
1248 }
1249
1250 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
1251 {
1252 struct pci_dev *pdev = hdev->pdev;
1253 struct hclgevf_hw *hw;
1254 int ret;
1255
1256 ret = pci_enable_device(pdev);
1257 if (ret) {
1258 dev_err(&pdev->dev, "failed to enable PCI device\n");
1259 goto err_no_drvdata;
1260 }
1261
1262 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1263 if (ret) {
1264 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
1265 goto err_disable_device;
1266 }
1267
1268 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
1269 if (ret) {
1270 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
1271 goto err_disable_device;
1272 }
1273
1274 pci_set_master(pdev);
1275 hw = &hdev->hw;
1276 hw->hdev = hdev;
1277 hw->io_base = pci_iomap(pdev, 2, 0);
1278 if (!hw->io_base) {
1279 dev_err(&pdev->dev, "can't map configuration register space\n");
1280 ret = -ENOMEM;
1281 goto err_clr_master;
1282 }
1283
1284 return 0;
1285
1286 err_clr_master:
1287 pci_clear_master(pdev);
1288 pci_release_regions(pdev);
1289 err_disable_device:
1290 pci_disable_device(pdev);
1291 err_no_drvdata:
1292 pci_set_drvdata(pdev, NULL);
1293 return ret;
1294 }
1295
1296 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
1297 {
1298 struct pci_dev *pdev = hdev->pdev;
1299
1300 pci_iounmap(pdev, hdev->hw.io_base);
1301 pci_clear_master(pdev);
1302 pci_release_regions(pdev);
1303 pci_disable_device(pdev);
1304 pci_set_drvdata(pdev, NULL);
1305 }
1306
1307 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
1308 {
1309 struct pci_dev *pdev = ae_dev->pdev;
1310 struct hclgevf_dev *hdev;
1311 int ret;
1312
1313 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1314 if (!hdev)
1315 return -ENOMEM;
1316
1317 hdev->pdev = pdev;
1318 hdev->ae_dev = ae_dev;
1319 ae_dev->priv = hdev;
1320
1321 ret = hclgevf_pci_init(hdev);
1322 if (ret) {
1323 dev_err(&pdev->dev, "PCI initialization failed\n");
1324 return ret;
1325 }
1326
1327 ret = hclgevf_init_msi(hdev);
1328 if (ret) {
1329 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
1330 goto err_irq_init;
1331 }
1332
1333 hclgevf_state_init(hdev);
1334
1335 ret = hclgevf_misc_irq_init(hdev);
1336 if (ret) {
1337 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
1338 ret);
1339 goto err_misc_irq_init;
1340 }
1341
1342 ret = hclgevf_cmd_init(hdev);
1343 if (ret)
1344 goto err_cmd_init;
1345
1346 ret = hclgevf_configure(hdev);
1347 if (ret) {
1348 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
1349 goto err_config;
1350 }
1351
1352 ret = hclgevf_alloc_tqps(hdev);
1353 if (ret) {
1354 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
1355 goto err_config;
1356 }
1357
1358 ret = hclgevf_set_handle_info(hdev);
1359 if (ret) {
1360 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
1361 goto err_config;
1362 }
1363
1364 /* Initialize VF's MTA */
1365 hdev->accept_mta_mc = true;
1366 ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
1367 if (ret) {
1368 dev_err(&hdev->pdev->dev,
1369 "failed(%d) to set mta filter mode\n", ret);
1370 goto err_config;
1371 }
1372
1373 /* Initialize RSS for this VF */
1374 ret = hclgevf_rss_init_hw(hdev);
1375 if (ret) {
1376 dev_err(&hdev->pdev->dev,
1377 "failed(%d) to initialize RSS\n", ret);
1378 goto err_config;
1379 }
1380
1381 ret = hclgevf_init_vlan_config(hdev);
1382 if (ret) {
1383 dev_err(&hdev->pdev->dev,
1384 "failed(%d) to initialize VLAN config\n", ret);
1385 goto err_config;
1386 }
1387
1388 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
1389
1390 return 0;
1391
1392 err_config:
1393 hclgevf_cmd_uninit(hdev);
1394 err_cmd_init:
1395 hclgevf_misc_irq_uninit(hdev);
1396 err_misc_irq_init:
1397 hclgevf_state_uninit(hdev);
1398 hclgevf_uninit_msi(hdev);
1399 err_irq_init:
1400 hclgevf_pci_uninit(hdev);
1401 return ret;
1402 }
1403
1404 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
1405 {
1406 struct hclgevf_dev *hdev = ae_dev->priv;
1407
1408 hclgevf_cmd_uninit(hdev);
1409 hclgevf_misc_irq_uninit(hdev);
1410 hclgevf_state_uninit(hdev);
1411 hclgevf_uninit_msi(hdev);
1412 hclgevf_pci_uninit(hdev);
1413 ae_dev->priv = NULL;
1414 }
1415
1416 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
1417 {
1418 struct hnae3_handle *nic = &hdev->nic;
1419 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1420
1421 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
1422 }
1423
1424 /**
1425 * hclgevf_get_channels - Get the current channels enabled and max supported.
1426 * @handle: hardware information for network interface
1427 * @ch: ethtool channels structure
1428 *
1429 * We don't support separate tx and rx queues as channels. The other count
1430 * represents how many queues are being used for control. max_combined counts
1431 * how many queue pairs we can support. They may not be mapped 1 to 1 with
1432 * q_vectors since we support a lot more queue pairs than q_vectors.
1433 **/
1434 static void hclgevf_get_channels(struct hnae3_handle *handle,
1435 struct ethtool_channels *ch)
1436 {
1437 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1438
1439 ch->max_combined = hclgevf_get_max_channels(hdev);
1440 ch->other_count = 0;
1441 ch->max_other = 0;
1442 ch->combined_count = hdev->num_tqps;
1443 }
1444
1445 static const struct hnae3_ae_ops hclgevf_ops = {
1446 .init_ae_dev = hclgevf_init_ae_dev,
1447 .uninit_ae_dev = hclgevf_uninit_ae_dev,
1448 .init_client_instance = hclgevf_register_client,
1449 .uninit_client_instance = hclgevf_unregister_client,
1450 .start = hclgevf_ae_start,
1451 .stop = hclgevf_ae_stop,
1452 .map_ring_to_vector = hclgevf_map_ring_to_vector,
1453 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
1454 .get_vector = hclgevf_get_vector,
1455 .reset_queue = hclgevf_reset_tqp,
1456 .set_promisc_mode = hclgevf_set_promisc_mode,
1457 .get_mac_addr = hclgevf_get_mac_addr,
1458 .set_mac_addr = hclgevf_set_mac_addr,
1459 .add_uc_addr = hclgevf_add_uc_addr,
1460 .rm_uc_addr = hclgevf_rm_uc_addr,
1461 .add_mc_addr = hclgevf_add_mc_addr,
1462 .rm_mc_addr = hclgevf_rm_mc_addr,
1463 .get_stats = hclgevf_get_stats,
1464 .update_stats = hclgevf_update_stats,
1465 .get_strings = hclgevf_get_strings,
1466 .get_sset_count = hclgevf_get_sset_count,
1467 .get_rss_key_size = hclgevf_get_rss_key_size,
1468 .get_rss_indir_size = hclgevf_get_rss_indir_size,
1469 .get_rss = hclgevf_get_rss,
1470 .set_rss = hclgevf_set_rss,
1471 .get_tc_size = hclgevf_get_tc_size,
1472 .get_fw_version = hclgevf_get_fw_version,
1473 .set_vlan_filter = hclgevf_set_vlan_filter,
1474 .get_channels = hclgevf_get_channels,
1475 };
1476
1477 static struct hnae3_ae_algo ae_algovf = {
1478 .ops = &hclgevf_ops,
1479 .name = HCLGEVF_NAME,
1480 .pdev_id_table = ae_algovf_pci_tbl,
1481 };
1482
1483 static int hclgevf_init(void)
1484 {
1485 pr_info("%s is initializing\n", HCLGEVF_NAME);
1486
1487 return hnae3_register_ae_algo(&ae_algovf);
1488 }
1489
1490 static void hclgevf_exit(void)
1491 {
1492 hnae3_unregister_ae_algo(&ae_algovf);
1493 }
1494 module_init(hclgevf_init);
1495 module_exit(hclgevf_exit);
1496
1497 MODULE_LICENSE("GPL");
1498 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
1499 MODULE_DESCRIPTION("HCLGEVF Driver");
1500 MODULE_VERSION(HCLGEVF_MOD_VERSION);